From: David Scott Date: Wed, 26 Jan 2011 17:39:05 +0000 (+0000) Subject: Remove all mutable state within the database layer, leaving one single global referen... X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=647b71a00b48dc44a3cf2fa49536443d5c1513a6;p=xcp%2Fxen-api.git Remove all mutable state within the database layer, leaving one single global reference (to the master's single "database"). Allow the type-safe Db.* API to be used on more than one database at a time, by adding the "current database" to Context.t. Add a notion of database callbacks which are used by xapi for both the redo-log(s) and the event system. Signed-off-by: David Scott --- diff --git a/ocaml/database/OMakefile b/ocaml/database/OMakefile index 0460194d..e3a94492 100644 --- a/ocaml/database/OMakefile +++ b/ocaml/database/OMakefile @@ -19,8 +19,8 @@ BLOCK_DEVICE_IO_FILES = \ OCamlProgram(block_device_io, $(BLOCK_DEVICE_IO_FILES)) OCamlDocProgram(block_device_io, $(BLOCK_DEVICE_IO_FILES)) -DATABASE_SERVER_FILES = database_server_main ../autogen/db_actions -DATABASE_TEST_FILES = database_test +DATABASE_SERVER_FILES = database_server_main database_test ../autogen/db_actions +DATABASE_TEST_FILES = database_test database_test_main section: #XXX there are lots of interdependencies which we should be aim to remove OCAML_LIBS += ../util/version ../idl/ocaml_backend/common ../idl/ocaml_backend/client ../util/stats ../idl/ocaml_backend/server diff --git a/ocaml/database/backend_xml.ml b/ocaml/database/backend_xml.ml index efcd13a0..478e9c7e 100644 --- a/ocaml/database/backend_xml.ml +++ b/ocaml/database/backend_xml.ml @@ -19,23 +19,10 @@ open Db_cache_types open Db_backend open Pervasiveext -(** Given a fresh cache, update ref/uuid/name_label indices *) -let update_index tables = - iter_over_tables - (fun name table -> - iter_over_rows - (fun rf row -> - let uuid = lookup_field_in_row row uuid_fname in - let name_label = try Some (lookup_field_in_row row name_label_fname) with _ -> None in - add_ref_to_table_map rf name; - Ref_index.insert {Ref_index.name_label = name_label; Ref_index.uuid = uuid; Ref_index._ref = rf} - ) table - ) tables - -let unmarshall dbconn = +let unmarshall schema dbconn = let filename = dbconn.Parse_db_conf.path in if not dbconn.Parse_db_conf.compress - then Db_xml.From.file filename + then Db_xml.From.file schema filename else let compressed = Unix.openfile filename [ Unix.O_RDONLY ] 0o0 in finally @@ -43,7 +30,7 @@ let unmarshall dbconn = let result = ref None in Gzip.decompress_passive compressed (fun uncompressed -> - result := Some (Db_xml.From.channel (Unix.in_channel_of_descr uncompressed)) + result := Some (Db_xml.From.channel schema (Unix.in_channel_of_descr uncompressed)) ); match !result with | None -> failwith "unmarshal failure" @@ -52,143 +39,53 @@ let unmarshall dbconn = (fun () -> Unix.close compressed) (* Given table name, read all rows from db and store in cache *) -let populate_and_read_manifest dbconn = +let populate schema dbconn = Printf.printf "attempting to restore database from %s\n" dbconn.Parse_db_conf.path; - let manifest, unmarshalled_db = unmarshall dbconn in - debug "database unmarshalled, schema version = %d.%d" manifest.Db_cache_types.schema_major_vsn manifest.Db_cache_types.schema_minor_vsn; + let db = unmarshall schema dbconn in + let major, minor = Manifest.schema (Database.manifest db) in + debug "database unmarshalled, schema version = %d.%d" major minor; (* version_check manifest; *) - update_index unmarshalled_db; - iter_over_tables (fun name table -> set_table_in_cache cache name table) unmarshalled_db; - Db_cache_types.set_schema_vsn cache (manifest.Db_cache_types.schema_major_vsn, manifest.Db_cache_types.schema_minor_vsn); - - manifest - -let populate dbconn = - ignore (populate_and_read_manifest dbconn) - -let atomically_write_to_db_file filename marshall = - let tmp_file = Filenameext.temp_file_in_dir filename in - try - debug "writing db as xml to file '%s'." filename; - marshall tmp_file; - Unix.rename tmp_file filename - with e -> (debug "Exception writing db xml: %s" (Printexc.to_string e); log_backtrace(); - Unix.unlink tmp_file; (* make sure we don't leak temp files *) - raise e) - -(* Write the given database to the redo-log *) -let flush_db_to_redo_log cache_to_flush = - if Redo_log.is_enabled () then begin - (* Atomically read the generation count and take a deep copy of the cache *) - let cache_copy = Db_lock.with_lock (fun () -> Db_cache_types.snapshot cache_to_flush) in - debug "Flushing cache to redo-log"; - let db_cache_manifest = Db_cache_types.manifest_of_cache cache_to_flush in - let write_db_to_fd = (fun out_fd -> Db_xml.To.fd out_fd (db_cache_manifest, cache_copy)) in - Redo_log.write_db (Db_cache_types.generation_of_cache cache_copy) write_db_to_fd - end + db (* atomically flush entire db cache to disk. If we are given a cache then flush that, otherwise flush the current state of the global in-memory cache *) -(* Flush the database to the specified connection. This fn is responsible for ensuring that - (i) the db lock is held in order to guarantee an atomic snapshot of the database is flushed; - and (ii) that the db lock is _not_ held whilst writing to remote storage. - - For simplicity we enforce these 2 constraints by taking an entire snapshot of the db with the - global lock held and then flushing this snapshot to disk. However, this process is optimised - so that we only snapshot when writing to _remote storage_; in the local storage case we hold - the db_lock whilst writing. -*) -let flush_common dbconn optional_cache = - let time = Unix.gettimeofday() in +let flush dbconn db = + let time = Unix.gettimeofday() in - let do_flush cache_to_flush filename = - flush_db_to_redo_log cache_to_flush; - let db_cache_manifest = Db_cache_types.manifest_of_cache cache in - if not dbconn.Parse_db_conf.compress - then Db_xml.To.file filename (db_cache_manifest, cache_to_flush) - else - let compressed = Unix.openfile filename [ Unix.O_WRONLY; Unix.O_CREAT; Unix.O_TRUNC ] 0o0 in - finally - (fun () -> Gzip.compress compressed (fun uncompressed -> Db_xml.To.fd uncompressed (db_cache_manifest, cache_to_flush))) - (fun () -> Unix.close compressed) in + let do_flush_xml db filename = + Redo_log.flush_db_to_redo_log db; + Unixext.atomic_write_to_file filename 0o0644 + (fun fd -> + if not dbconn.Parse_db_conf.compress + then Db_xml.To.fd fd db + else + Gzip.compress fd + (fun uncompressed -> Db_xml.To.fd uncompressed db) + ) in - let marshall filename = - match optional_cache with - (* if we're flushing global db cache then snapshot with db_lock if we're writing to remote storage; - otherwise don't snapshot and do entire flush to local storage whilst holding db_lock *) - None -> - if dbconn.Parse_db_conf.is_on_remote_storage then (* writing to remote storage -- can't hold db_lock during flush *) - let snapshot_of_global_cache = Db_lock.with_lock (fun () -> snapshot cache) in - do_flush snapshot_of_global_cache filename - else (* writing to local storage -- just hold db_lock and flush directly from global cache (saving memory for snapshot in common case) *) - Db_lock.with_lock (fun () -> do_flush cache filename) - (* if we were given a specific cache to flush then just do it; no locks required *) - | Some c -> do_flush c filename in - let filename = dbconn.Parse_db_conf.path in - let generation_filename = Generation.filename dbconn in - atomically_write_to_db_file filename marshall; - Unixext.write_string_to_file generation_filename (Int64.to_string (Db_cache_types.generation_of_cache cache)); - debug "XML backend [%s] -- Write buffer flushed. Time: %f" filename (Unix.gettimeofday() -. time) + let do_flush_gen db filename = + let generation = Manifest.generation (Database.manifest db) in + Unixext.write_string_to_file filename (Generation.to_string generation) in + let filename = dbconn.Parse_db_conf.path in + do_flush_xml db filename; + let generation_filename = Parse_db_conf.generation_filename dbconn in + do_flush_gen db generation_filename; -(* We don't do any incremental flushing in this backend - we just check if any tables are dirty; if so we - flush everything and reset dirty status to clean *) + debug "XML backend [%s] -- Write buffer flushed. Time: %f" filename (Unix.gettimeofday() -. time) -(* Since this fn is not called with the db_lock we have to take this lock explicitly when accessing shared db state - -- e.g. the global tables that record what's dirty per database connection -*) -let flush_dirty dbconn = - (* is there anything for me to flush? *) - let sql_table_names_to_flush = - Db_lock.with_lock (* paranoia.. can almost certainly get away without taking db lock here.. *) - (fun () -> - List.filter (fun tbl_name->(Hashtbl.find table_persist_options tbl_name)<>Datamodel_types.PersistNothing) db_table_names) in - let anything_dirty = - Db_lock.with_lock (* definitely need db lock here becuase we're accessing shared db state *) - (fun () -> - List.fold_left (fun acc x -> acc || x) false (List.map (fun tbl -> Db_dirty.read_my_dirty_table_status dbconn tbl) sql_table_names_to_flush)) in - let clear_all_dirty_flags () = - Db_lock.with_lock (* definitely need db lock here because we're accessing shared db state *) - (fun () -> - List.iter - (fun tbl_name -> - Db_dirty.clear_my_dirty_table_status dbconn tbl_name; - let tbl = lookup_table_in_cache cache tbl_name in - let rows = get_rowlist tbl in - List.iter - (fun row -> - let objref = lookup_field_in_row row reference_fname in - Db_dirty.clear_my_row_dirty_status dbconn objref) - rows - ) - sql_table_names_to_flush - ) in - if anything_dirty then - begin - flush_common dbconn None; - clear_all_dirty_flags() - end; - anything_dirty (* return true if we did some flushing, false otherwise *) -let force_flush_all dbconn optional_cache = - flush_common dbconn optional_cache - -(* Does nothing. This is just a hook (left over from the sqlite days) in case we - * ever want to be notified of an object's deletion. *) -let notify_delete dbconn tblname objref = - () +(* NB We don't do incremental flushing *) -let read_schema_vsn dbconn = - (* inefficient to read whole db file just to parse schema vsn; but since this fn is - only called at startup I don't think we care.. *) - let manifest, unmarshalled_db = unmarshall dbconn in - manifest.Db_cache_types.schema_major_vsn, manifest.Db_cache_types.schema_minor_vsn - -let create_empty_db (major, minor) dbconn = - let empty_cache = create_empty_cache () in - Db_cache_types.set_schema_vsn empty_cache (major, minor); +let flush_dirty dbconn = + let db = get_database () in + let g = Manifest.generation (Database.manifest db) in + if g > dbconn.Parse_db_conf.last_generation_count then begin + flush dbconn db; + dbconn.Parse_db_conf.last_generation_count <- g; + true + end else false - List.iter (fun tbl -> set_table_in_cache empty_cache tbl (create_empty_table ())) db_table_names; - flush_common dbconn (Some empty_cache) + diff --git a/ocaml/database/backend_xml.mli b/ocaml/database/backend_xml.mli index 38b3469f..4d371e4f 100644 --- a/ocaml/database/backend_xml.mli +++ b/ocaml/database/backend_xml.mli @@ -11,11 +11,10 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -val populate: Parse_db_conf.db_connection -> unit + +open Db_cache_types + +val populate: Schema.t -> Parse_db_conf.db_connection -> Database.t val flush_dirty: Parse_db_conf.db_connection -> bool -val force_flush_all: Parse_db_conf.db_connection -> Db_cache_types.cache option -> unit -val read_schema_vsn: Parse_db_conf.db_connection -> int*int -val create_empty_db: int*int -> Parse_db_conf.db_connection -> unit -val populate_and_read_manifest: Parse_db_conf.db_connection -> Db_cache_types.db_dump_manifest -val notify_delete: Parse_db_conf.db_connection -> string -> string -> unit -val flush_db_to_redo_log: Db_cache_types.cache -> unit +val flush: Parse_db_conf.db_connection -> Database.t -> unit + diff --git a/ocaml/database/database_server_main.ml b/ocaml/database/database_server_main.ml index ec2f0d35..fa9529fd 100644 --- a/ocaml/database/database_server_main.ml +++ b/ocaml/database/database_server_main.ml @@ -30,14 +30,18 @@ let remote_database_access_handler_v2 req bio = flush stdout; raise e +module Local_tests = Database_test.Tests(Db_cache_impl) + let _ = let listen_path = ref "./database" in + let self_test = ref false in Printexc.record_backtrace true; Arg.parse [ "--slave-of", Arg.String (fun master -> mode := Some(Slave master)), "run as a slave of a remote db"; "--master", Arg.String (fun db -> mode := Some(Master db)), "run as a master from the given db filename"; "--listen-on", Arg.Set_string listen_path, Printf.sprintf "listen for requests on path (default %s)" !listen_path; + "--test", Arg.Set self_test, "Run unit tests in-process"; ] (fun x -> Printf.fprintf stderr "Ignoring unknown parameter: %s\n%!" x) "run a stand-alone database server"; @@ -48,17 +52,12 @@ let _ = | Slave _ -> failwith "unimplemented" | Master db_filename -> Printf.printf "Database path: %s\n%!" db_filename; - let db = { Parse_db_conf.dummy_conf with - Parse_db_conf.path = db_filename - } in + let db = Parse_db_conf.make db_filename in Db_conn_store.initialise_db_connections [ db ]; - Printf.printf "About to create new dbs\n%!"; - List.iter (Db_connections.maybe_create_new_db (0,0)) (Db_conn_store.read_db_connections()); - Printf.printf "dbs created\n%!"; Db_cache.set_master true; - Db_dirty.make_blank_dirty_records(); - Db_cache_impl.initialise (); + Db_cache_impl.make [ db ] (Schema.of_datamodel ()); + Db_cache_impl.sync [ db ] (Db_backend.get_database ()); Unixext.unlink_safe !listen_path; let sockaddr = Unix.ADDR_UNIX !listen_path in @@ -68,6 +67,11 @@ let _ = Http_svr.add_handler Http.Post "/post_remote_db_access_v2" (Http_svr.BufIO remote_database_access_handler_v2); let server = Http_svr.start (socket, "http") in Printf.printf "server listening\n%!"; + if !self_test then begin + Printf.printf "Running unit-tests\n%!"; + Local_tests.main true; + Printf.printf "All tests passed\n%!"; + end; (* Wait for either completion *) Mutex.execute m (fun () -> diff --git a/ocaml/database/database_test.ml b/ocaml/database/database_test.ml index e626bb9c..c73dddd9 100644 --- a/ocaml/database/database_test.ml +++ b/ocaml/database/database_test.ml @@ -1,426 +1,509 @@ -(* Supported operations: *) - -let path = ref "./database" - -let rpc_common url content_type request = - let version = "1.1" in - let content_length = String.length request in - let headers = [ - Printf.sprintf "POST %s HTTP/%s" url version; - Printf.sprintf "User-Agent: xapi/%s" Xapi_globs.api_version_string; - "Content-Type: text/json"; - Printf.sprintf "Content-length: %d" content_length; - ] in - Xmlrpcclient.do_http_rpc "" 0 headers ~unixsock:(Some (!path)) request - (fun content_length _ fd -> - let buffer = String.make content_length '\000' in - Unixext.really_read fd buffer 0 content_length; - buffer) - -module Client_v1 = Db_rpc_client_v1.Make(struct - let initialise () = () - let rpc request = rpc_common "/post_remote_db_access" "text/xml" request -end) - -module Client_v2 = Db_rpc_client_v2.Make(struct - let initialise () = () - let rpc request = rpc_common "/post_remote_db_access_v2" "text/json" request -end) - -module Client = Client_v2 - -let name = "thevmname" -let invalid_name = "notavmname" - -let make_vm r uuid = - [ - "ref", r; +(* + * Copyright (C) 2010 Citrix Systems Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + + +module Tests = functor(Client: Db_interface.DB_ACCESS) -> struct + + let name = "thevmname" + let invalid_name = "notavmname" + + let make_vm r uuid = + [ +(* "ref", r; *) + "uuid", uuid; + "memory__static_max", "0"; + "memory__overhead", "0"; + "PV__ramdisk", ""; + "is_control_domain", "false"; + "actions__after_crash", "restart"; + "resident_on", "OpaqueRef:NULL"; + "snapshot_info", "()"; + "PCI_bus", ""; + "PV__args", ""; + "last_boot_CPU_flags", "()"; + "memory__target", "536870912"; + "is_a_template", "true"; + "user_version", "1"; + "HVM__shadow_multiplier", "1"; + "affinity", "OpaqueRef:NULL"; + "name__description", ""; + "PV__legacy_args", ""; + "parent", "OpaqueRef:NULL"; + "snapshot_metadata", ""; + "memory__dynamic_max", "0"; + "ha_always_run", "false"; + "other_config", "()"; + "PV__bootloader_args" ,""; + "VCPUs__at_startup", "1"; + "bios_strings", "()"; + "actions__after_shutdown", "destroy"; + "blocked_operations", "()"; + "tags", "()"; + "PV__kernel", ""; + "name__label", name; + "is_a_snapshot", "false"; + "VCPUs__params", "()"; + "VCPUs__max", "1"; + "allowed_operations", "()"; +(* "protection_policy", "OpaqueRef:NULL"; *) (* test of default *) + "memory__static_min", "268435456"; + "domid", "-1"; + "power_state", "Halted"; + "HVM__boot_policy", ""; + "ha_restart_priority", ""; + "suspend_VDI", "OpaqueRef:NULL"; + "HVM__boot_params", "()"; + "PV__bootloader", "eliloader"; + "transportable_snapshot_id", ""; + "snapshot_of", "OpaqueRef:NULL"; + "guest_metrics", "OpaqueRef:NULL"; + "platform", "()"; + "scheduled_to_be_resident_on", "OpaqueRef:NULL"; + "is_snapshot_from_vmpp", "false"; + "current_operations", "()"; + "recommendations", ""; + "last_booted_record", ""; + "blobs", "()"; + "domarch", ""; + "memory__dynamic_min", "0"; + "metrics", "OpaqueRef:NULL"; + "actions__after_reboot", "restart"; + "xenstore_data", "()"; + "snapshot_time", "19700101T00:00:00Z" + ] + + let make_vbd vm r uuid = [ +(* "ref", r; *) + "qos__supported_algorithms", "()"; + "other_config", "(('owner' ''))"; "uuid", uuid; - "memory__static_max", "0"; - "memory__overhead", "0"; - "PV__ramdisk", ""; - "is_control_domain", "false"; - "actions__after_crash", "restart"; - "resident_on", "OpaqueRef:NULL"; - "snapshot_info", "()"; - "PCI_bus", ""; - "PV__args", ""; - "last_boot_CPU_flags", "()"; - "memory__target", "536870912"; - "is_a_template", "true"; - "user_version", "1"; - "HVM__shadow_multiplier", "1"; - "affinity", "OpaqueRef:NULL"; - "name__description", ""; - "PV__legacy_args", ""; - "parent", "OpaqueRef:NULL"; - "snapshot_metadata", ""; - "memory__dynamic_max", "0"; - "ha_always_run", "false"; - "other_config", "()"; - "PV__bootloader_args" ,""; - "VCPUs__at_startup", "1"; - "bios_strings", "()"; - "actions__after_shutdown", "destroy"; - "blocked_operations", "()"; - "tags", "()"; - "PV__kernel", ""; - "name__label", name; - "is_a_snapshot", "false"; - "VCPUs__params", "()"; - "VCPUs__max", "1"; - "allowed_operations", "()"; - "protection_policy", "OpaqueRef:NULL"; - "memory__static_min", "268435456"; - "domid", "-1"; - "power_state", "Halted"; - "HVM__boot_policy", ""; - "ha_restart_priority", ""; - "suspend_VDI", "OpaqueRef:NULL"; - "HVM__boot_params", "()"; - "PV__bootloader", "eliloader"; - "transportable_snapshot_id", ""; - "snapshot_of", "OpaqueRef:NULL"; - "guest_metrics", "OpaqueRef:NULL"; - "platform", "()"; - "scheduled_to_be_resident_on", "OpaqueRef:NULL"; - "is_snapshot_from_vmpp", "false"; - "current_operations", "()"; - "recommendations", ""; - "last_booted_record", ""; - "blobs", "()"; - "domarch", ""; - "memory__dynamic_min", "0"; + "allowed_operations", "('attach')"; + "qos__algorithm_params", "()"; + "type", "Disk"; + "VM", vm; + "VDI", "OpaqueRef:NULL"; + "qos__algorithm_type", ""; "metrics", "OpaqueRef:NULL"; - "actions__after_reboot", "restart"; - "xenstore_data", "()"; - "snapshot_time", "19700101T00:00:00Z" + "device", ""; + "empty", "false"; + "bootable", "false"; + "current_operations", "()"; + "unpluggable", "true"; + "status_detail", ""; + "runtime_properties", "()"; + "userdevice", "0"; + "mode", "RW"; + "storage_lock", "false"; + "status_code", "0"; + "currently_attached", "false"; ] + + let expect_missing_row tbl r f = + try + f () + with Db_exn.DBCache_NotFound("missing row", tbl', r') when tbl' = tbl && r = r' -> () -let make_vbd vm r uuid = [ - "ref", r; - "qos__supported_algorithms", "()"; - "other_config", "(('owner' ''))"; - "uuid", uuid; - "allowed_operations", "('attach')"; - "qos__algorithm_params", "()"; - "type", "Disk"; - "VM", vm; - "VDI", "OpaqueRef:NULL"; - "qos__algorithm_type", ""; - "metrics", "OpaqueRef:NULL"; - "device", ""; - "empty", "false"; - "bootable", "false"; - "current_operations", "()"; - "unpluggable", "true"; - "status_detail", ""; - "runtime_properties", "()"; - "userdevice", "0"; - "mode", "RW"; - "storage_lock", "false"; - "status_code", "0"; - "currently_attached", "false"; -] - -let expect_missing_row tbl r f = - try - f () - with Db_exn.DBCache_NotFound("missing row", tbl', r') when tbl' = tbl && r = r' -> () - -let expect_missing_tbl tbl f = - try - f () - with Db_exn.DBCache_NotFound("missing table", tbl', "") when tbl' = tbl -> () - -let expect_uniqueness_violation tbl fld v f = - try - f () - with Db_exn.Uniqueness_constraint_violation(tbl', fld', v') when tbl' = tbl && fld' = fld && v' = v -> () + let expect_missing_tbl tbl f = + try + f () + with Db_exn.DBCache_NotFound("missing table", tbl', "") when tbl' = tbl -> () + + let expect_uniqueness_violation tbl fld v f = + try + f () + with Db_exn.Uniqueness_constraint_violation(tbl', fld', v') when tbl' = tbl && fld' = fld && v' = v -> () -let expect_missing_uuid tbl uuid f = - try - f () - with Db_exn.Read_missing_uuid(tbl', "", uuid') when tbl' = tbl && uuid' = uuid -> () - -let expect_missing_field name f = - try - f () - with Db_exn.DBCache_NotFound("missing field", name', "") when name' = name -> () - -let test_invalid_where_record fn_name fn = - Printf.printf "%s ...\n" fn_name; - expect_missing_tbl "Vm" - (fun () -> - let (_: string list) = fn { Db_cache_types.table = "Vm"; return = ""; where_field = ""; where_value = "" } in - failwith (Printf.sprintf "%s " fn_name) - ); - Printf.printf "%s \n" fn_name; - expect_missing_field "wibble" - (fun () -> - let (_: string list) = fn { Db_cache_types.table = "VM"; return = "wibble"; where_field = Escaping.escape_id [ "name"; "label" ]; where_value = name } in - failwith (Printf.sprintf "%s " fn_name) - ); - Printf.printf "%s \n" fn_name; - expect_missing_field "wibble" - (fun () -> - let (_: string list) = fn { Db_cache_types.table = "VM"; return = Escaping.escape_id [ "name"; "label" ]; where_field = "wibble"; where_value = "" } in - failwith (Printf.sprintf "%s " fn_name) - ) - - -let _ = - Printexc.record_backtrace true; - Arg.parse [ - "--connect-to", Arg.Set_string path, Printf.sprintf "connect to server on path (default %s)" !path; - ] (fun x -> Printf.fprintf stderr "Ignoring unknown parameter: %s\n%!" x) - "query a database server"; - - (* reference which we create *) - let valid_ref = "ref1" in - let valid_uuid = "uuid1" in - let invalid_ref = "foo" in - let invalid_uuid = "bar" in + let expect_missing_uuid tbl uuid f = + try + f () + with Db_exn.Read_missing_uuid(tbl', "", uuid') when tbl' = tbl && uuid' = uuid -> () + let expect_missing_field name f = + try + f () + with Db_exn.DBCache_NotFound("missing field", name', "") when name' = name -> () + + let test_invalid_where_record fn_name fn = + Printf.printf "%s ...\n" fn_name; + expect_missing_tbl "Vm" + (fun () -> + let (_: string list) = fn { Db_cache_types.table = "Vm"; return = ""; where_field = ""; where_value = "" } in + failwith (Printf.sprintf "%s " fn_name) + ); + Printf.printf "%s \n" fn_name; + expect_missing_field "wibble" + (fun () -> + let (_: string list) = fn { Db_cache_types.table = "VM"; return = "wibble"; where_field = Escaping.escape_id [ "name"; "label" ]; where_value = name } in + failwith (Printf.sprintf "%s " fn_name) + ); + Printf.printf "%s \n" fn_name; + expect_missing_field "wibble" + (fun () -> + let (_: string list) = fn { Db_cache_types.table = "VM"; return = Escaping.escape_id [ "name"; "label" ]; where_field = "wibble"; where_value = "" } in + failwith (Printf.sprintf "%s " fn_name) + ) + + (* Verify the ref_index contents are correct for a given [tblname] and [key] (uuid/ref) *) + let check_ref_index tblname key = match Ref_index.lookup key with + | None -> + (* We should fail to find the row *) + expect_missing_row tblname key + (fun () -> let (_: string) = Client.read_field tblname "uuid" key in ()); + expect_missing_uuid tblname key + (fun () -> let (_: string) = Client.db_get_by_uuid tblname key in ()) + | Some { Ref_index.name_label = name_label; uuid = uuid; _ref = _ref } -> + (* key should be either uuid or _ref *) + if key <> uuid && (key <> _ref) + then failwith (Printf.sprintf "check_ref_index %s key %s: got ref %s uuid %s" tblname key _ref uuid); + let real_ref = if Client.is_valid_ref key then key else Client.db_get_by_uuid tblname key in + let real_name_label = + try Some (Client.read_field tblname "name__label" real_ref) + with _ -> None in + if name_label <> real_name_label + then failwith (Printf.sprintf "check_ref_index %s key %s: ref_index name_label = %s; db has %s" tblname key (Opt.default "None" name_label) (Opt.default "None" real_name_label)) + + + let main in_process = + (* reference which we create *) + let valid_ref = "ref1" in + let valid_uuid = "uuid1" in + let invalid_ref = "foo" in + let invalid_uuid = "bar" in + let vbd_ref = "waz" in - let vbd_uuid = "whatever" in - - (* Before we begin, clear out any old state: *) - expect_missing_row "VM" valid_ref - (fun () -> - Client.delete_row "VM" valid_ref; - ); - expect_missing_row "VBD" vbd_ref - (fun () -> - Client.delete_row "VBD" vbd_ref; - ); - Printf.printf "Deleted stale state from previous test\n"; - - Printf.printf "get_table_from_ref \n"; - begin - match Client.get_table_from_ref invalid_ref with - | None -> Printf.printf "Reference '%s' has no associated table\n" invalid_ref - | Some t -> failwith (Printf.sprintf "Reference '%s' exists in table '%s'" invalid_ref t) - end; - Printf.printf "is_valid_ref \n"; - if Client.is_valid_ref invalid_ref then failwith "is_valid_ref = true"; - - Printf.printf "read_refs \n"; - let existing_refs = Client.read_refs "VM" in - Printf.printf "VM refs: [ %s ]\n" (String.concat "; " existing_refs); - Printf.printf "read_refs \n"; - expect_missing_tbl "Vm" - (fun () -> - let (_: string list) = Client.read_refs "Vm" in - () - ); - Printf.printf "delete_row \n"; - expect_missing_row "VM" invalid_ref - (fun () -> - Client.delete_row "VM" invalid_ref; - failwith "delete_row of a non-existent row silently succeeded" - ); - Printf.printf "create_row \n"; - Client.create_row "VM" (make_vm valid_ref valid_uuid) valid_ref; - Printf.printf "is_valid_ref \n"; - if not (Client.is_valid_ref valid_ref) - then failwith "is_valid_ref = false, after create_row"; - Printf.printf "get_table_from_ref \n"; - begin match Client.get_table_from_ref valid_ref with - | Some "VM" -> () - | Some t -> failwith "get_table_from_ref : invalid table" - | None -> failwith "get_table_from_ref : None" - end; - Printf.printf "read_refs includes \n"; - if not (List.mem valid_ref (Client.read_refs "VM")) - then failwith "read_refs did not include "; - - Printf.printf "create_row \n"; - expect_uniqueness_violation "VM" "_ref" valid_ref - (fun () -> - Client.create_row "VM" (make_vm valid_ref (valid_uuid ^ "unique")) valid_ref; - failwith "create_row " - ); - Printf.printf "create_row \n"; - expect_uniqueness_violation "VM" "uuid" valid_uuid - (fun () -> - Client.create_row "VM" (make_vm (valid_ref ^ "unique") valid_uuid) (valid_ref ^ "unique"); - failwith "create_row " - ); - Printf.printf "db_get_by_uuid \n"; - if Client.db_get_by_uuid "VM" valid_uuid <> valid_ref - then failwith "db_get_by_uuid "; - Printf.printf "db_get_by_uuid \n"; - expect_missing_uuid "VM" invalid_uuid - (fun () -> - let (_: string) = Client.db_get_by_uuid "VM" invalid_uuid in - failwith "db_get_by_uuid " - ); - Printf.printf "get_by_name_label \n"; - if Client.db_get_by_name_label "VM" invalid_name <> [] - then failwith "db_get_by_name_label "; - - Printf.printf "get_by_name_label \n"; - if Client.db_get_by_name_label "VM" name <> [ valid_ref ] - then failwith "db_get_by_name_label "; - - Printf.printf "read_field \n"; - if Client.read_field "VM" "name__label" valid_ref <> name - then failwith "read_field : invalid name"; - Printf.printf "read_field \n"; - expect_missing_row "VM" invalid_ref - (fun () -> - let (_: string) = Client.read_field "VM" "name__label" invalid_ref in - failwith "read_field " - ); - Printf.printf "read_field \n"; - expect_missing_field "name_label" - (fun () -> - let (_: string) = Client.read_field "VM" "name_label" valid_ref in - failwith "read_field " - ); - Printf.printf "read_field \n"; - expect_missing_row "VM" invalid_ref - (fun () -> - let (_: string) = Client.read_field "VM" "name_label" invalid_ref in - failwith "read_field " - ); - Printf.printf "read_field_where \n"; - let where_name_label = - { Db_cache_types.table = "VM"; return = Escaping.escape_id(["name"; "label"]); where_field="uuid"; where_value = valid_uuid } in - let xs = Client.read_field_where where_name_label in - if not (List.mem name xs) - then failwith "read_field_where "; - test_invalid_where_record "read_field_where" Client.read_field_where; - - let xs = Client.read_set_ref where_name_label in - if not (List.mem name xs) - then failwith "read_set_ref "; - test_invalid_where_record "read_set_ref" Client.read_set_ref; - - Printf.printf "write_field \n"; - expect_missing_tbl "Vm" - (fun () -> - let (_: unit) = Client.write_field "Vm" "" "" "" in - failwith "write_field " - ); - Printf.printf "write_field \n"; - expect_missing_row "VM" invalid_ref - (fun () -> - let (_: unit) = Client.write_field "VM" invalid_ref "" "" in - failwith "write_field " - ); - Printf.printf "write_field \n"; - expect_missing_field "wibble" - (fun () -> - let (_: unit) = Client.write_field "VM" valid_ref "wibble" "" in - failwith "write_field " - ); - Printf.printf "write_field \n"; - let (_: unit) = Client.write_field "VM" valid_ref (Escaping.escape_id ["name"; "description"]) "description" in - - Printf.printf "read_record \n"; - expect_missing_tbl "Vm" - (fun () -> - let _ = Client.read_record "Vm" invalid_ref in - failwith "read_record " - ); - Printf.printf "read_record \n"; - expect_missing_row "VM" invalid_ref - (fun () -> - let _ = Client.read_record "VM" invalid_ref in - failwith "read_record " - ); - Printf.printf "read_record \n"; - let fv_list, fvs_list = Client.read_record "VM" valid_ref in - if not(List.mem_assoc (Escaping.escape_id [ "name"; "label" ]) fv_list) - then failwith "read_record 1"; - if List.assoc "VBDs" fvs_list <> [] - then failwith "read_record 2"; - Printf.printf "read_record foreign key\n"; - Client.create_row "VBD" (make_vbd valid_ref vbd_ref vbd_uuid) vbd_ref; - let fv_list, fvs_list = Client.read_record "VM" valid_ref in - if List.assoc "VBDs" fvs_list <> [ vbd_ref ] - then failwith "read_record 3"; - Printf.printf "read_record deleted foreign key\n"; - Client.delete_row "VBD" vbd_ref; - let fv_list, fvs_list = Client.read_record "VM" valid_ref in - if List.assoc "VBDs" fvs_list <> [] - then failwith "read_record 4"; - Printf.printf "read_record overwritten foreign key\n"; - Client.create_row "VBD" (make_vbd valid_ref vbd_ref vbd_uuid) vbd_ref; - let fv_list, fvs_list = Client.read_record "VM" valid_ref in - if List.assoc "VBDs" fvs_list = [] - then failwith "read_record 5"; - Client.write_field "VBD" vbd_ref (Escaping.escape_id [ "VM" ]) "overwritten"; - let fv_list, fvs_list = Client.read_record "VM" valid_ref in - if List.assoc "VBDs" fvs_list <> [] - then failwith "read_record 6"; - - expect_missing_tbl "Vm" - (fun () -> - let _ = Client.read_records_where "Vm" Db_filter_types.True in - () - ); - let xs = Client.read_records_where "VM" Db_filter_types.True in - if List.length xs <> 1 - then failwith "read_records_where 2"; - let xs = Client.read_records_where "VM" Db_filter_types.False in - if xs <> [] - then failwith "read_records_where 3"; - - expect_missing_tbl "Vm" - (fun () -> - let xs = Client.find_refs_with_filter "Vm" Db_filter_types.True in - failwith "find_refs_with_filter "; + let vbd_uuid = "whatever" in + + (* Before we begin, clear out any old state: *) + expect_missing_row "VM" valid_ref + (fun () -> + Client.delete_row "VM" valid_ref; ); - let xs = Client.find_refs_with_filter "VM" Db_filter_types.True in - if List.length xs <> 1 - then failwith "find_refs_with_filter 1"; - let xs = Client.find_refs_with_filter "VM" Db_filter_types.False in - if xs <> [] - then failwith "find_refs_with_filter 2"; + if in_process then check_ref_index "VM" valid_ref; - expect_missing_tbl "Vm" - (fun () -> - Client.process_structured_field ("","") "Vm" "wibble" invalid_ref Db_cache_types.AddSet; - failwith "process_structure_field " - ); - expect_missing_field "wibble" - (fun () -> - Client.process_structured_field ("","") "VM" "wibble" valid_ref Db_cache_types.AddSet; - failwith "process_structure_field " - ); - expect_missing_row "VM" invalid_ref + expect_missing_row "VBD" vbd_ref (fun () -> - Client.process_structured_field ("","") "VM" (Escaping.escape_id ["name"; "label"]) invalid_ref Db_cache_types.AddSet; - failwith "process_structure_field " + Client.delete_row "VBD" vbd_ref; ); - Client.process_structured_field ("foo", "") "VM" "tags" valid_ref Db_cache_types.AddSet; - if Client.read_field "VM" "tags" valid_ref <> "('foo')" - then failwith "process_structure_field expected ('foo')"; - Client.process_structured_field ("foo", "") "VM" "tags" valid_ref Db_cache_types.AddSet; - if Client.read_field "VM" "tags" valid_ref <> "('foo')" - then failwith "process_structure_field expected ('foo') 2"; - Client.process_structured_field ("foo", "bar") "VM" "other_config" valid_ref Db_cache_types.AddMap; - - if Client.read_field "VM" "other_config" valid_ref <> "(('foo' 'bar'))" - then failwith "process_structure_field expected (('foo' 'bar')) 3"; - - begin - try - Client.process_structured_field ("foo", "bar") "VM" "other_config" valid_ref Db_cache_types.AddMap; - with Db_exn.Duplicate_key("VM", "other_config", r', "foo") when r' = valid_ref -> () - end; - if Client.read_field "VM" "other_config" valid_ref <> "(('foo' 'bar'))" - then failwith "process_structure_field expected (('foo' 'bar')) 4"; + if in_process then check_ref_index "VBD" vbd_ref; + + Printf.printf "Deleted stale state from previous test\n"; + + Printf.printf "get_table_from_ref \n"; + begin + match Client.get_table_from_ref invalid_ref with + | None -> Printf.printf "Reference '%s' has no associated table\n" invalid_ref + | Some t -> failwith (Printf.sprintf "Reference '%s' exists in table '%s'" invalid_ref t) + end; + Printf.printf "is_valid_ref \n"; + if Client.is_valid_ref invalid_ref then failwith "is_valid_ref = true"; + + Printf.printf "read_refs \n"; + let existing_refs = Client.read_refs "VM" in + Printf.printf "VM refs: [ %s ]\n" (String.concat "; " existing_refs); + Printf.printf "read_refs \n"; + expect_missing_tbl "Vm" + (fun () -> + let (_: string list) = Client.read_refs "Vm" in + () + ); + Printf.printf "delete_row \n"; + expect_missing_row "VM" invalid_ref + (fun () -> + Client.delete_row "VM" invalid_ref; + failwith "delete_row of a non-existent row silently succeeded" + ); + Printf.printf "create_row \n"; + expect_missing_field "name__label" + (fun () -> + let broken_vm = List.filter (fun (k, _) -> k <> "name__label") (make_vm valid_ref valid_uuid) in + Client.create_row "VM" broken_vm valid_ref; + failwith "create_row " + ); + Printf.printf "create_row \n"; + Client.create_row "VM" (make_vm valid_ref valid_uuid) valid_ref; + if in_process then check_ref_index "VM" valid_ref; + Printf.printf "is_valid_ref \n"; + if not (Client.is_valid_ref valid_ref) + then failwith "is_valid_ref = false, after create_row"; + Printf.printf "get_table_from_ref \n"; + begin match Client.get_table_from_ref valid_ref with + | Some "VM" -> () + | Some t -> failwith "get_table_from_ref : invalid table" + | None -> failwith "get_table_from_ref : None" + end; + Printf.printf "read_refs includes \n"; + if not (List.mem valid_ref (Client.read_refs "VM")) + then failwith "read_refs did not include "; + + Printf.printf "create_row \n"; + expect_uniqueness_violation "VM" "_ref" valid_ref + (fun () -> + Client.create_row "VM" (make_vm valid_ref (valid_uuid ^ "unique")) valid_ref; + failwith "create_row " + ); + Printf.printf "create_row \n"; + expect_uniqueness_violation "VM" "uuid" valid_uuid + (fun () -> + Client.create_row "VM" (make_vm (valid_ref ^ "unique") valid_uuid) (valid_ref ^ "unique"); + failwith "create_row " + ); + Printf.printf "db_get_by_uuid \n"; + let r = Client.db_get_by_uuid "VM" valid_uuid in + if r <> valid_ref + then failwith (Printf.sprintf "db_get_by_uuid : got %s; expected %s" r valid_ref); + Printf.printf "db_get_by_uuid \n"; + expect_missing_uuid "VM" invalid_uuid + (fun () -> + let (_: string) = Client.db_get_by_uuid "VM" invalid_uuid in + failwith "db_get_by_uuid " + ); + Printf.printf "get_by_name_label \n"; + if Client.db_get_by_name_label "VM" invalid_name <> [] + then failwith "db_get_by_name_label "; + + Printf.printf "get_by_name_label \n"; + if Client.db_get_by_name_label "VM" name <> [ valid_ref ] + then failwith "db_get_by_name_label "; + + Printf.printf "read_field \n"; + if Client.read_field "VM" "name__label" valid_ref <> name + then failwith "read_field : invalid name"; + + Printf.printf "read_field \n"; + if Client.read_field "VM" "protection_policy" valid_ref <> "OpaqueRef:NULL" + then failwith "read_field : invalid protection_policy"; + + Printf.printf "read_field \n"; + expect_missing_row "VM" invalid_ref + (fun () -> + let (_: string) = Client.read_field "VM" "name__label" invalid_ref in + failwith "read_field " + ); + Printf.printf "read_field \n"; + expect_missing_field "name_label" + (fun () -> + let (_: string) = Client.read_field "VM" "name_label" valid_ref in + failwith "read_field " + ); + Printf.printf "read_field \n"; + expect_missing_row "VM" invalid_ref + (fun () -> + let (_: string) = Client.read_field "VM" "name_label" invalid_ref in + failwith "read_field " + ); + Printf.printf "read_field_where \n"; + let where_name_label = + { Db_cache_types.table = "VM"; return = Escaping.escape_id(["name"; "label"]); where_field="uuid"; where_value = valid_uuid } in + let xs = Client.read_field_where where_name_label in + if not (List.mem name xs) + then failwith "read_field_where "; + test_invalid_where_record "read_field_where" Client.read_field_where; + + let xs = Client.read_set_ref where_name_label in + if not (List.mem name xs) + then failwith "read_set_ref "; + test_invalid_where_record "read_set_ref" Client.read_set_ref; + + Printf.printf "write_field \n"; + expect_missing_tbl "Vm" + (fun () -> + let (_: unit) = Client.write_field "Vm" "" "" "" in + failwith "write_field " + ); + Printf.printf "write_field \n"; + expect_missing_row "VM" invalid_ref + (fun () -> + let (_: unit) = Client.write_field "VM" invalid_ref "" "" in + failwith "write_field " + ); + Printf.printf "write_field \n"; + expect_missing_field "wibble" + (fun () -> + let (_: unit) = Client.write_field "VM" valid_ref "wibble" "" in + failwith "write_field " + ); + Printf.printf "write_field \n"; + let (_: unit) = Client.write_field "VM" valid_ref (Escaping.escape_id ["name"; "description"]) "description" in + if in_process then check_ref_index "VM" valid_ref; + Printf.printf "write_field - invalidating ref_index\n"; + let (_: unit) = Client.write_field "VM" valid_ref (Escaping.escape_id ["name"; "label"]) "newlabel" in + if in_process then check_ref_index "VM" valid_ref; + + Printf.printf "read_record \n"; + expect_missing_tbl "Vm" + (fun () -> + let _ = Client.read_record "Vm" invalid_ref in + failwith "read_record " + ); + Printf.printf "read_record \n"; + expect_missing_row "VM" invalid_ref + (fun () -> + let _ = Client.read_record "VM" invalid_ref in + failwith "read_record " + ); + Printf.printf "read_record \n"; + let fv_list, fvs_list = Client.read_record "VM" valid_ref in + if not(List.mem_assoc (Escaping.escape_id [ "name"; "label" ]) fv_list) + then failwith "read_record 1"; + if List.assoc "VBDs" fvs_list <> [] + then failwith "read_record 2"; + Printf.printf "read_record foreign key\n"; + Client.create_row "VBD" (make_vbd valid_ref vbd_ref vbd_uuid) vbd_ref; + let fv_list, fvs_list = Client.read_record "VM" valid_ref in + if List.assoc "VBDs" fvs_list <> [ vbd_ref ] + then failwith "read_record 3"; + Printf.printf "read_record deleted foreign key\n"; + Client.delete_row "VBD" vbd_ref; + let fv_list, fvs_list = Client.read_record "VM" valid_ref in + if List.assoc "VBDs" fvs_list <> [] + then failwith "read_record 4"; + Printf.printf "read_record overwritten foreign key\n"; + Client.create_row "VBD" (make_vbd valid_ref vbd_ref vbd_uuid) vbd_ref; + let fv_list, fvs_list = Client.read_record "VM" valid_ref in + if List.assoc "VBDs" fvs_list = [] + then failwith "read_record 5"; + Client.write_field "VBD" vbd_ref (Escaping.escape_id [ "VM" ]) "overwritten"; + let fv_list, fvs_list = Client.read_record "VM" valid_ref in + if List.assoc "VBDs" fvs_list <> [] + then failwith "read_record 6"; + + expect_missing_tbl "Vm" + (fun () -> + let _ = Client.read_records_where "Vm" Db_filter_types.True in + () + ); + let xs = Client.read_records_where "VM" Db_filter_types.True in + if List.length xs <> 1 + then failwith "read_records_where 2"; + let xs = Client.read_records_where "VM" Db_filter_types.False in + if xs <> [] + then failwith "read_records_where 3"; + + expect_missing_tbl "Vm" + (fun () -> + let xs = Client.find_refs_with_filter "Vm" Db_filter_types.True in + failwith "find_refs_with_filter "; + ); + let xs = Client.find_refs_with_filter "VM" Db_filter_types.True in + if List.length xs <> 1 + then failwith "find_refs_with_filter 1"; + let xs = Client.find_refs_with_filter "VM" Db_filter_types.False in + if xs <> [] + then failwith "find_refs_with_filter 2"; + + expect_missing_tbl "Vm" + (fun () -> + Client.process_structured_field ("","") "Vm" "wibble" invalid_ref Db_cache_types.AddSet; + failwith "process_structure_field " + ); + expect_missing_field "wibble" + (fun () -> + Client.process_structured_field ("","") "VM" "wibble" valid_ref Db_cache_types.AddSet; + failwith "process_structure_field " + ); + expect_missing_row "VM" invalid_ref + (fun () -> + Client.process_structured_field ("","") "VM" (Escaping.escape_id ["name"; "label"]) invalid_ref Db_cache_types.AddSet; + failwith "process_structure_field " + ); + Client.process_structured_field ("foo", "") "VM" "tags" valid_ref Db_cache_types.AddSet; + if Client.read_field "VM" "tags" valid_ref <> "('foo')" + then failwith "process_structure_field expected ('foo')"; + Client.process_structured_field ("foo", "") "VM" "tags" valid_ref Db_cache_types.AddSet; + if Client.read_field "VM" "tags" valid_ref <> "('foo')" + then failwith "process_structure_field expected ('foo') 2"; + Client.process_structured_field ("foo", "bar") "VM" "other_config" valid_ref Db_cache_types.AddMap; + + if Client.read_field "VM" "other_config" valid_ref <> "(('foo' 'bar'))" + then failwith "process_structure_field expected (('foo' 'bar')) 3"; + + begin + try + Client.process_structured_field ("foo", "bar") "VM" "other_config" valid_ref Db_cache_types.AddMap; + with Db_exn.Duplicate_key("VM", "other_config", r', "foo") when r' = valid_ref -> () + end; + if Client.read_field "VM" "other_config" valid_ref <> "(('foo' 'bar'))" + then failwith "process_structure_field expected (('foo' 'bar')) 4"; + + (* Check that non-persistent fields are filled with an empty value *) + + + (* Performance test *) + if in_process then begin + let time n f = + let start = Unix.gettimeofday () in + for i = 0 to n do + f i + done; + let total = Unix.gettimeofday () -. start in + float_of_int n /. total in + + let n = 5000 in + + let rpc_time = time n (fun _ -> + let (_: bool) = Client.is_valid_ref valid_ref in ()) in + + Printf.printf "%.2f primitive RPC calls/sec\n" rpc_time; + + (* Delete stuff left-over from the previous run *) + let delete_time = time n + (fun i -> + let rf = Printf.sprintf "%s:%d" vbd_ref i in + try + Client.delete_row "VBD" rf + with _ -> () + ) in + Printf.printf "Deleted %d VBD records, %.2f calls/sec\n%!" n delete_time; + + expect_missing_row "VBD" vbd_ref + (fun () -> + Client.delete_row "VBD" vbd_ref; + ); + + (* Create lots of VBDs referening no VM *) + let create_time = time n + (fun i -> + let rf = Printf.sprintf "%s:%d" vbd_ref i in + let uuid = Printf.sprintf "%s:%d" vbd_uuid i in + Client.create_row "VBD" (make_vbd invalid_ref rf uuid) rf; + ) in + Printf.printf "Created %d VBD records, %.2f calls/sec\n%!" n create_time; + + let m = 300000 in (* multiple of 3 *) + + (* Time a benign VM create_row, delete_row, read_record sequence *) + let benign_time = time m + (fun i -> + if i < (m / 3 * 2) then begin + if i mod 2 = 0 + then Client.create_row "VBD" (make_vbd valid_ref vbd_ref vbd_uuid) vbd_ref + else Client.delete_row "VBD" vbd_ref + end else + let fv_list, fvs_list = Client.read_record "VM" valid_ref in + () + ) in + Printf.printf "good sequence: %.2f calls/sec\n%!" benign_time; + + let malign_time = time m + (fun i -> + match i mod 3 with + | 0 -> Client.create_row "VBD" (make_vbd valid_ref vbd_ref vbd_uuid) vbd_ref + | 1 -> Client.delete_row "VBD" vbd_ref + | 2 -> let fv_list, fvs_list = Client.read_record "VM" valid_ref in + () + ) in + Printf.printf "bad sequence: %.2f calls/sec\n%!" malign_time; + end +end - (* Performance test *) - let start = Unix.gettimeofday () in - let n = 10000 in - for i = 0 to n do - let (_: bool) = Client.is_valid_ref valid_ref in - () - done; - let total = Unix.gettimeofday () -. start in - Printf.printf "%.2f RPC calls/sec\n" (float_of_int n /. total) diff --git a/ocaml/database/database_test.mli b/ocaml/database/database_test.mli new file mode 100644 index 00000000..d81d5461 --- /dev/null +++ b/ocaml/database/database_test.mli @@ -0,0 +1,17 @@ +(* + * Copyright (C) 2010 Citrix Systems Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +module Tests : functor (Client: Db_interface.DB_ACCESS) -> sig + val main: bool -> unit +end diff --git a/ocaml/database/database_test_main.ml b/ocaml/database/database_test_main.ml new file mode 100644 index 00000000..d900957f --- /dev/null +++ b/ocaml/database/database_test_main.ml @@ -0,0 +1,54 @@ +(* + * Copyright (C) 2010 Citrix Systems Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +open Database_test + +let path = ref "./database" + +let rpc_common url content_type request = + let version = "1.1" in + let content_length = String.length request in + let headers = [ + Printf.sprintf "POST %s HTTP/%s" url version; + Printf.sprintf "User-Agent: xapi/%s" Xapi_globs.api_version_string; + "Content-Type: text/json"; + Printf.sprintf "Content-length: %d" content_length; + ] in + Xmlrpcclient.do_http_rpc "" 0 headers ~unixsock:(Some (!path)) request + (fun content_length _ fd -> + let buffer = String.make content_length '\000' in + Unixext.really_read fd buffer 0 content_length; + buffer) + +module Client_v1 = Db_rpc_client_v1.Make(struct + let initialise () = () + let rpc request = rpc_common "/post_remote_db_access" "text/xml" request +end) + +module Client_v2 = Db_rpc_client_v2.Make(struct + let initialise () = () + let rpc request = rpc_common "/post_remote_db_access_v2" "text/json" request +end) + +module T = Tests(Client_v2) + + +let _ = + Printexc.record_backtrace true; + Arg.parse [ + "--connect-to", Arg.Set_string path, Printf.sprintf "connect to server on path (default %s)" !path; + ] (fun x -> Printf.fprintf stderr "Ignoring unknown parameter: %s\n%!" x) + "query a database server"; + T.main () + diff --git a/ocaml/database/db_action_helper.ml b/ocaml/database/db_action_helper.ml index 026db4ed..34b0e5da 100644 --- a/ocaml/database/db_action_helper.ml +++ b/ocaml/database/db_action_helper.ml @@ -12,12 +12,6 @@ * GNU Lesser General Public License for more details. *) -(** Table column name which contains the reference *) -let reference = Escaping.reference - -(** Table column name which contains the uuid *) -let uuid = "uuid" - (* General DB utils *) let __callback : ((?snapshot: XMLRPC.xmlrpc -> string -> string -> string -> unit) option ref) = ref None diff --git a/ocaml/database/db_backend.ml b/ocaml/database/db_backend.ml index a8931409..ea71000e 100644 --- a/ocaml/database/db_backend.ml +++ b/ocaml/database/db_backend.ml @@ -13,159 +13,27 @@ *) open Db_exn open Db_lock -open Db_action_helper open Db_cache_types +open Pervasiveext module D = Debug.Debugger(struct let name = "sql" end) open D -(* delete a db file and its generation count, best effort *) -let try_and_delete_db_file file = - (try Unix.unlink file with _ -> ()); - (try Unix.unlink (file^".generation") with _ -> ()) - -(* --------------------- Useful datamodel constructions: *) - -(* Return a list of all the SQL table names *) -(* ---- NOTE THIS DEPENDENCY ACTUALLY LINKS IDL DATAMODEL INTO BINARY ---- *) -let api_objs = Dm_api.objects_of_api Datamodel.all_api -let api_relations = Dm_api.relations_of_api Datamodel.all_api -let db_table_names = - List.map (fun x->Escaping.escape_obj x.Datamodel_types.name) api_objs - -(* Build a table that maps table names onto their persistency options *) -let table_persist_options = Hashtbl.create 20 -let _ = - begin - let objs = Dm_api.objects_of_api Datamodel.all_api in - List.iter (fun x->Hashtbl.replace table_persist_options (Escaping.escape_obj x).Datamodel_types.name x.Datamodel_types.persist) objs - end - -let this_table_persists tblname = (Hashtbl.find table_persist_options tblname)=Datamodel_types.PersistEverything - -(* Flatten fields in heirarchical namespace (as they are in IDL) into a flat list *) -let rec flatten_fields fs acc = - match fs with - [] -> acc - | (Datamodel_types.Field f)::fs -> flatten_fields fs (f::acc) - | (Datamodel_types.Namespace (_,internal_fs))::fs -> flatten_fields fs (flatten_fields internal_fs acc) - -(* Build a table that maps table/field names onto field persistency options *) -let field_persist_options : (string (* table name *) * string (* field name *), bool) Hashtbl.t = Hashtbl.create 20 -let _ = - begin - let objs = Dm_api.objects_of_api Datamodel.all_api in - List.iter - (fun obj-> - let fields = flatten_fields obj.Datamodel_types.contents [] in - List.iter (fun f->Hashtbl.replace field_persist_options - ((Escaping.escape_obj obj).Datamodel_types.name (* table name *), - (Escaping.escape_id f.Datamodel_types.full_name) (* field name *)) - f.Datamodel_types.field_persist) fields - ) - objs - end - -let persist_field_changes tblname fldname = - Hashtbl.find field_persist_options (tblname,fldname) - -(* --------------------- Some field-name constants *) - -(** Table column name which contains the reference *) -let reference_fname = Escaping.reference - -(** Table column name which contains the name_label *) -let name_label_fname = Escaping.escape_id ["name";"label"] - -(** Table column name which contains the uuid *) -let uuid_fname = "uuid" - (* --------------------- Constants/data-structures for storing db contents *) let db_FLUSH_TIMER=2.0 (* flush db write buffer every db_FLUSH_TIMER seconds *) let display_sql_writelog_val = ref true (* compute/write sql-writelog debug string *) (* The cache itself: *) -let cache : Db_cache_types.cache = create_empty_cache () - -(* Keep track of all references, and which class a reference belongs to: *) -let ref_table_map : (string,string) Hashtbl.t = Hashtbl.create 100 +let database : Db_cache_types.Database.t ref = ref (Db_cache_types.Database.make (Schema.of_datamodel ())) (* --------------------- Util functions on db datastructures *) -(* These fns are called internally, and always in locked context, so don't need to take lock again *) -let add_ref_to_table_map objref tblname = - Hashtbl.replace ref_table_map objref tblname -let remove_ref_from_table_map objref = - Hashtbl.remove ref_table_map objref - -(* Given a (possibly partial) new row to write, check if any db - constraints are violated *) -let check_unique_table_constraints tblname newrow = - let check_unique_constraint tblname fname opt_value = - match opt_value with - None -> () - | Some v -> - if List.mem v (get_column cache tblname fname) then begin - error "Uniqueness constraint violation: table %s field %s value %s" tblname fname v; - (* Note: it's very important that the Uniqueness_constraint_violation exception is thrown here, since - this is what is caught/marshalled over the wire in the remote case.. Do not be tempted to make this - an API exception! :) *) - raise (Uniqueness_constraint_violation ( tblname, fname, v )) - end in - let new_uuid = try Some (lookup_field_in_row newrow uuid_fname) with _ -> None in - let new_ref = try Some (lookup_field_in_row newrow reference_fname) with _ -> None in - check_unique_constraint tblname uuid_fname new_uuid; - check_unique_constraint tblname reference_fname new_ref - - -(* --------------------- Util functions to support incremental index generation *) - -(* Incrementally build and cache indexes *) -type index = (string, string list) Hashtbl.t - (* index takes a tbl-name, where-field, return-fld and returns an index - mapping where-val onto a return-val list *) -let indexes : (string*string*string, index) Hashtbl.t = Hashtbl.create 50 -let invalidate_indexes tbl = - Hashtbl.iter (fun (index_tbl,w,r) _ -> - if tbl=index_tbl then Hashtbl.remove indexes (index_tbl,w,r)) indexes - -let invalidate_indexes_for_specific_field tbl fldname = - Hashtbl.iter (fun (index_tbl,where,r) _ -> - if tbl=index_tbl && where=fldname then - Hashtbl.remove indexes (index_tbl,where,r)) indexes - -let add_to_index i (k,v) = - let existing_results_in_index = try Hashtbl.find i k with _ -> [] in - Hashtbl.replace i k (v::existing_results_in_index) - -(* -------------------- Version upgrade support utils *) - -(* This function (which adds default values that are not already present to table rows) is used in both the create_row - call (where we fill in missing fields not supplied by an older vsn of the db protocol during pool-rolling-upgrade); - and in the state.db schema upgrade process (see db_upgrade.ml) *) - -(* check default values, filling in if they're not present: [for handling older vsns of the db protocol] *) -(* it's not the most efficient soln to just search in the association list, looking up default fields from datamodel - each time; but creates are rare and fields are few, so it's fine... *) -let add_default_kvs kvs tblname = - - (* given a default value from IDL, turn it into a string for db insertion. !!! This should be merged up with other - marshalling code at some point (although this requires some major refactoring cos that's all autogenerated and - not "dynamically typed".. *) - let gen_db_string_value f = - match f.Datamodel_types.default_value with - Some v -> Datamodel_values.to_db_string v - | None -> "" (* !!! Should never happen *) in - - let this_obj = List.find (fun obj-> (Escaping.escape_obj obj.Datamodel_types.name) = tblname) (Dm_api.objects_of_api Datamodel.all_api) in - let default_fields = List.filter (fun f -> f.Datamodel_types.default_value <> None) (flatten_fields this_obj.Datamodel_types.contents []) in - let default_values = List.map gen_db_string_value default_fields in - let default_field_names = List.map (fun f -> Escaping.escape_id f.Datamodel_types.full_name) default_fields in - let all_default_kvs = List.combine default_field_names default_values in - (* only add kv pairs for keys that have not already been supplied to create_row call (in kvs argument) *) - let keys_supplied = List.map fst kvs in - kvs @ (List.filter (fun (k,_) -> not (List.mem k keys_supplied)) all_default_kvs) +let update_database f = + database := f (!database) + +let get_database () = !database + (* !!! Right now this is called at cache population time. It would probably be preferable to call it on flush time instead, so we don't waste writes storing non-persistent field values on disk.. At the moment there's not much to worry about, since there are @@ -173,56 +41,68 @@ let add_default_kvs kvs tblname = (* non-persistent fields will have been flushed to disk anyway [since non-persistent just means dont trigger a flush if I change]. Hence we blank non-persistent fields with a suitable empty value, depending on their type *) -let blow_away_non_persistent_fields() = - (* for each table, go through and blow away any non-persistent fields *) - let remove_non_persistent_field_values_from_tbl tblname = - let tbl = lookup_table_in_cache cache tblname in - let rows = get_rowlist tbl in - let this_obj = List.find (fun obj-> (Escaping.escape_obj obj.Datamodel_types.name) = tblname) (Dm_api.objects_of_api Datamodel.all_api) in - let non_persist_fields = List.filter (fun f -> not f.Datamodel_types.field_persist) (flatten_fields this_obj.Datamodel_types.contents []) in - let non_persist_fields_and_types = List.map (fun f -> f.Datamodel_types.ty, f) non_persist_fields in - (* if this table doesn't have any non persistent fields then there's nothing to do... *) - if non_persist_fields <> [] then - begin - let process_row r = - List.iter - (fun (ftype,f) -> - set_field_in_row r (Escaping.escape_id f.Datamodel_types.full_name) (Datamodel_values.gen_empty_db_val ftype)) - non_persist_fields_and_types in - List.iter process_row rows - end in - List.iter remove_non_persistent_field_values_from_tbl (List.map (fun x->Escaping.escape_obj x.Datamodel_types.name) api_objs) +let blow_away_non_persistent_fields (schema: Schema.t) db = + Printf.printf "blow_away\n%!"; + (* Generate a new row given a table schema *) + let row schema row : Row.t = + Row.fold + (fun name v acc -> + try + let col = Schema.Table.find name schema in + let v' = if col.Schema.Column.persistent then v else col.Schema.Column.empty in + Row.add name v' acc + with Not_found -> + Printf.printf "Skipping unknown column: %s\n%!" name; + acc) row Row.empty in + (* Generate a new table *) + let table tblname tbl : Table.t = + let schema = Schema.Database.find tblname schema.Schema.database in + Table.fold + (fun objref r acc -> + let r = row schema r in + Table.add objref r acc) tbl Table.empty in + Database.update + (fun ts -> + TableSet.fold + (fun tblname tbl acc -> + let tbl' = table tblname tbl in + TableSet.add tblname tbl' acc) ts TableSet.empty) + db (* after restoring from backup, we take the master's host record and make it reflect us *) -let post_restore_hook manifest = +let post_restore_hook db = debug "Executing post_restore_hook"; let my_installation_uuid = Xapi_inventory.lookup Xapi_inventory._installation_uuid in let my_control_uuid = Xapi_inventory.lookup Xapi_inventory._control_domain_uuid in let not_found = "" in + (* Look up the pool master: *) - let pools = lookup_table_in_cache cache Db_names.pool in - let master = fold_over_rows (fun _ref r acc -> lookup_field_in_row r Db_names.master) pools not_found in - if master = not_found - then debug "No master record to update" - else begin - - let mhr = find_row cache Db_names.host master in - set_field_in_row mhr uuid_fname my_installation_uuid; - let _ref = lookup_field_in_row mhr reference_fname in - Ref_index.update_uuid _ref my_installation_uuid - end; - - (* Look up the pool master's control domain: *) - let vms = lookup_table_in_cache cache Db_names.vm in - let master_dom0 = fold_over_rows (fun _ref r acc -> if lookup_field_in_row r Db_names.resident_on = master && (lookup_field_in_row r Db_names.is_control_domain = "true") then _ref else acc) vms not_found in - if master_dom0 = not_found - then debug "No master control domain record to update" - else begin - - let mdr = find_row cache Db_names.vm master_dom0 in - set_field_in_row mdr uuid_fname my_control_uuid; - let _ref = lookup_field_in_row mdr reference_fname in - Ref_index.update_uuid _ref my_control_uuid - end; - debug "post_restore_hook executed" + let pools = TableSet.find Db_names.pool (Database.tableset db) in + let master = Table.fold (fun _ref r acc -> Row.find Db_names.master r) pools not_found in + + let update_master_host db : Database.t = + if master = not_found then begin + debug "No master record to update"; + db + end else begin + set_field_in_row Db_names.host master Db_names.uuid my_installation_uuid db + end in + + let update_master_dom0 db : Database.t = + (* Look up the pool master's control domain: *) + let vms = TableSet.find Db_names.vm (Database.tableset db) in + let master_dom0 = Table.fold (fun _ref r acc -> + if + Row.find Db_names.resident_on r = master && + (Row.find Db_names.is_control_domain r = "true") + then _ref else acc) vms not_found in + if master_dom0 = not_found then begin + debug "No master control domain record to update"; + db + end else begin + set_field_in_row Db_names.vm master_dom0 Db_names.uuid my_control_uuid db + end in + + (update_master_host ++ update_master_dom0) db + diff --git a/ocaml/database/db_cache_impl.ml b/ocaml/database/db_cache_impl.ml index a21c6edf..0f83a52d 100644 --- a/ocaml/database/db_cache_impl.ml +++ b/ocaml/database/db_cache_impl.ml @@ -16,24 +16,24 @@ open Db_exn open Db_lock +open Pervasiveext module D = Debug.Debugger(struct let name = "sql" end) open D module W = Debug.Debugger(struct let name = "db_write" end) open Db_cache_types -open Db_action_helper open Db_backend -let context = Context.make "db_cache" - (* This fn is part of external interface, so need to take lock *) let get_table_from_ref objref = with_lock (fun () -> - if Hashtbl.mem ref_table_map objref - then Some (Hashtbl.find ref_table_map objref) - else None) + let db = get_database () in + try + Some (Database.table_of_ref objref db) + with Not_found -> + None) let is_valid_ref objref = match (get_table_from_ref objref) with @@ -44,21 +44,10 @@ let is_valid_ref objref = let read_field tblname fldname objref = with_lock (fun () -> - let row = find_row cache tblname objref in - lookup_field_in_row row fldname) + let db = get_database () in + Row.find fldname (Table.find_exn tblname objref (TableSet.find tblname (Database.tableset db))) + ) -let table_of_kvs kvs = - let row = create_empty_row () in - List.iter (fun (k,v)-> set_field_in_row row k v) kvs; - row - -let save_in_redo_log context entry = - if Redo_log.is_enabled() then begin - Redo_log.write_delta (Db_cache_types.generation_of_cache Db_backend.cache) entry - (fun () -> (* the function which will be invoked if a database write is required instead of a delta *) - Backend_xml.flush_db_to_redo_log Db_backend.cache - ) - end (** Finds the longest XML-compatible UTF-8 prefix of the given *) (** string, by truncating the string at the first incompatible *) @@ -70,313 +59,162 @@ let ensure_utf8_xml string = if length > String.length prefix then warn "string truncated to: '%s'." prefix; prefix + (* Write field in cache *) let write_field tblname objref fldname newval = with_lock (fun () -> - (* if uuid or reference then check uniqueness constraints: *) - if fldname=uuid_fname then begin - check_unique_table_constraints tblname (table_of_kvs [(uuid_fname, newval)]); - Ref_index.update_uuid objref newval; - end else if fldname=reference_fname then - check_unique_table_constraints tblname (table_of_kvs [(reference_fname, newval)]) - else if fldname=name_label_fname then - Ref_index.update_name_label objref newval; - - let row = find_row cache tblname objref in - let current_val = lookup_field_in_row row fldname in - - let other_tbl_refs = Eventgen.follow_references tblname in - let other_tbl_refs_for_this_field = - List.filter (fun (_,fld) -> fld=fldname) other_tbl_refs in + let db = get_database () in + + let row = Table.find_exn tblname objref (TableSet.find tblname (Database.tableset db)) in + let current_val = Row.find fldname row in let newval = ensure_utf8_xml newval in if current_val<>newval then begin W.debug "write_field %s,%s: %s |-> %s" tblname objref fldname newval; - invalidate_indexes_for_specific_field tblname fldname; (* Update the field in the cache whether it's persistent or not *) - set_field_in_row row fldname newval; - - (* then only mark written row as dirty if we persist writes on this table && persist changes on this field *) - if (this_table_persists tblname) && (persist_field_changes tblname fldname) then - begin - (* Only flush to disk if persistent *) - Db_dirty.set_all_row_dirty_status objref Db_dirty.Modified; - Db_dirty.set_all_dirty_table_status tblname; - Db_cache_types.increment Db_backend.cache; - save_in_redo_log context (Redo_log.WriteField(tblname, objref, fldname, newval)) - end; - - let events_old_val = - if is_valid_ref current_val then - Eventgen.events_of_other_tbl_refs - (List.map (fun (tbl,fld) -> - (tbl, current_val, Eventgen.find_get_record tbl ~__context:context ~self:current_val)) other_tbl_refs_for_this_field) - else [] - in - - let events_new_val = - if is_valid_ref newval then - Eventgen.events_of_other_tbl_refs - (List.map (fun (tbl,fld) -> - (tbl, newval, Eventgen.find_get_record tbl ~__context:context ~self:newval)) other_tbl_refs_for_this_field) - else [] - in - - (* Generate event *) - let snapshot = Eventgen.find_get_record tblname ~__context:context ~self:objref in - let record = snapshot() in - List.iter (function - | tbl, ref, None -> - error "Failed to send MOD event for %s %s" tbl ref; - Printf.printf "Failed to send MOD event for %s %s\n%!" tbl ref; - | tbl, ref, Some s -> - events_notify ~snapshot:s tbl "mod" ref - ) events_old_val; - begin match record with - | None -> - error "Failed to send MOD event for %s %s" tblname objref; - Printf.printf "Failed to send MOD event for %s %s\n%!" tblname objref; - | Some record -> - events_notify ~snapshot:record tblname "mod" objref; - end; - List.iter (function - | tbl, ref, None -> - error "Failed to send MOD event for %s %s" tbl ref; - Printf.printf "Failed to send MOD event for %s %s\n%!" tbl ref; - | tbl, ref, Some s -> - events_notify ~snapshot:s tbl "mod" ref - ) events_new_val; + update_database (set_field_in_row tblname objref fldname newval); + + Database.notify (WriteField(tblname, objref, fldname, current_val, newval)) db; + + (* then only persist the change if the schema says so *) + if Schema.is_field_persistent (Database.schema db) tblname fldname + then update_database Database.increment; end) -(* Read specified field from tbl where where_field == where_value, using indexing *) +(* This function *should* only be used by db_actions code looking up Set(Ref _) fields: + if we detect another (illegal) use we log the problem and fall back to a slow scan *) let read_set_ref rcd = - with_lock - (fun () -> - (* See if index exists for this lookup, if not make index *) - let index = - try Hashtbl.find indexes (rcd.table, rcd.where_field, rcd.return) - with _ -> - begin - let tbl = lookup_table_in_cache cache rcd.table in - let rows = get_rowlist tbl in - let new_index = Hashtbl.create (List.length rows) in - let rec populate_index rows = - match rows with - [] -> () - | (r::rs) -> - let indexed_field_value = lookup_field_in_row r rcd.where_field in - let result_field_value = lookup_field_in_row r rcd.return in - add_to_index new_index (indexed_field_value, result_field_value); - populate_index rs in - populate_index rows; (* populate new index *) - Hashtbl.replace indexes (rcd.table, rcd.where_field, rcd.return) new_index; - new_index - end in - (* Lookup query in index *) - try Hashtbl.find index rcd.where_value with _ -> []) + let db = get_database () in + + (* The where_record should correspond to the 'one' end of a 'one to many' *) + let one_tbl = rcd.table in + let one_fld = rcd.where_field in + let rels = + try + Schema.one_to_many one_tbl (Database.schema db) + with Not_found -> + raise (Db_exn.DBCache_NotFound("missing table", one_tbl, "")) + in + (* This is an 'illegal' use if: *) + let illegal = rcd.return <> Db_names.ref || (List.filter (fun (a, _, _) -> a = one_fld) rels = []) in + if not illegal then begin + let _, many_tbl, many_fld = List.find (fun (a, _, _) -> a = one_fld) rels in + let objref = rcd.where_value in + let str = read_field many_tbl many_fld objref in + String_unmarshall_helper.set (fun x -> x) str + end else begin + error "Illegal read_set_ref query { table = %s; where_field = %s; where_value = %s; return = %s }; falling back to linear scan" rcd.table rcd.where_field rcd.where_value rcd.return; + Printf.printf "Illegal read_set_ref query { table = %s; where_field = %s; where_value = %s; return = %s }; falling back to linear scan\n%!" rcd.table rcd.where_field rcd.where_value rcd.return; + with_lock + (fun () -> + let db = get_database () in + let tbl = TableSet.find rcd.table (Database.tableset db) in + Table.fold + (fun rf row acc -> + if Row.find rcd.where_field row = rcd.where_value + then Row.find rcd.return row :: acc else acc) + tbl [] + ) + end + + (* setrefs contain the relationships from tbl to other tables in the form: local-classname, local-fieldname, remote-classname, remote-fieldname. db_read_record reads row from tbl with reference==objref [returning (fieldname, fieldvalue) list]. and iterates through set-refs [returning (fieldname, ref list) list; where fieldname is the name of the Set Ref field in tbl; and ref list is the list of foreign keys from related table with remote-fieldname=objref] *) -let read_record tbl objref = +let read_record tblname objref = with_lock (fun ()-> - let row = find_row cache tbl objref (* !! need fields as well as values here !! *) in - let fvlist = fold_over_fields (fun k d env -> (k,d)::env) row [] in - let get_set_ref tbl fld objref = - read_set_ref {table=tbl; return=reference_fname; - where_field=fld; where_value=objref} in - - let look_up_related_table_and_field obj other full_name = - (* Set(Ref t) is actually stored in the table t *) - let this_end = obj.Datamodel_types.name, List.hd (full_name) in - (* XXX: relationships should store full names *) - let obj', fld' = Datamodel_utils.Relations.other_end_of Datamodel.all_api this_end in - (obj', fld') in - - (* find datamodel object that corresponds to this table *) - let obj = List.find (fun obj -> obj.Datamodel_types.name = tbl) api_objs in - (* read its fields *) - let obj_fields = Datamodel_utils.fields_of_obj obj in - - let rec set_refs ls = - match ls with - [] -> [] - | ({Datamodel_types.ty = Datamodel_types.Set(Datamodel_types.Ref clsname); full_name = full_name}::fs) -> - let obj', fld' = look_up_related_table_and_field obj clsname full_name in - (Escaping.escape_obj obj.Datamodel_types.name, (* local classname *) - Escaping.escape_id full_name, (* local field *) - Escaping.escape_obj obj', (* remote classname *) - fld' (* remote fieldname *))::(set_refs fs) - | _::fs -> set_refs fs in - - let setrefs = set_refs obj_fields in - - let sr_fields = - List.map (fun (_,local_fieldname,remote_classname,remote_fieldname)-> - (local_fieldname, - get_set_ref remote_classname remote_fieldname objref)) setrefs in - (fvlist, sr_fields)) - + let db = get_database () in + let tbl = TableSet.find tblname (Database.tableset db) in + let row = Table.find_exn tblname objref tbl in + let fvlist = Row.fold (fun k d env -> (k,d)::env) row [] in + (* Unfortunately the interface distinguishes between Set(Ref _) types and + ordinary fields *) + let schema = Schema.table tblname (Database.schema db) in + let set_ref = List.filter (fun (k, _) -> + try + let column = Schema.Table.find k schema in + column.Schema.Column.issetref + with Not_found as e -> + Printf.printf "Failed to find table %s in schema\n%!" k; + raise e + ) fvlist in + (* the set_ref fields must be converted back into lists *) + let set_ref = List.map (fun (k, v) -> + k, String_unmarshall_helper.set (fun x -> x) v) set_ref in + (fvlist, set_ref)) + (* Delete row from tbl *) let delete_row tblname objref = - let tbl = lookup_table_in_cache cache tblname in - (* Look up the row first: in the event it doesn't exist, this will - immediately failed with a DBCache_NotFound *) - let (_: row) = lookup_row_in_table tbl tblname objref in - (* NB we generate the delete event BEFORE deleting the object - but then generate the mod events afterwards *) - let generate_delete_event () = - match Eventgen.find_get_record tblname ~__context:context ~self:objref () with - | None -> - error "Failed to generate DEL event for %s %s" tblname objref; - Printf.printf "Failed to generate DEL event for %s %s\n%!" tblname objref; - | Some snapshot -> - events_notify ~snapshot tblname "del" objref in - (* Return a thunk which will cause the mod events to be generated - containing the object states at the time the thunk is evaluated. - We create this closure while the objref is still valid *) - let lazily_generate_mod_events () = - let other_tbl_refs = Eventgen.follow_references tblname in - let other_tbl_refs = - List.fold_left (fun accu (remote_tbl,fld) -> - let (kv,_) = read_record tblname objref in - let fld_value = List.assoc fld kv in - if is_valid_ref fld_value - then (remote_tbl, fld_value, Eventgen.find_get_record remote_tbl ~__context:context ~self:fld_value) :: accu - else accu) - [] other_tbl_refs in - fun () -> - let other_tbl_ref_events = Eventgen.events_of_other_tbl_refs other_tbl_refs in - List.iter (function - | tbl, ref, None -> - error "Failed to generate MOD event on %s %s" tbl ref; - Printf.printf "Failed to generate MOD event on %s %s\n%!" tbl ref; - | tbl, ref, Some s -> - events_notify ~snapshot:s tbl "mod" ref - ) other_tbl_ref_events in with_lock (fun () -> W.debug "delete_row %s (%s)" tblname objref; - (* send event *) - generate_delete_event(); - let mod_events = lazily_generate_mod_events () in - - invalidate_indexes tblname; - - remove_row_from_table tbl objref; - - (* Notify each db connection of delete *) - List.iter (fun dbconn->Backend_xml.notify_delete dbconn tblname objref) (Db_conn_store.read_db_connections()); + + let db = get_database () in + let tbl = TableSet.find tblname (Database.tableset db) in + let row = Table.find_exn tblname objref tbl in - if (this_table_persists tblname) then - begin - (* Update cache dirty status *) - Db_dirty.clear_all_row_dirty_status objref; - Db_dirty.set_all_dirty_table_status tblname; - Db_cache_types.increment Db_backend.cache; - save_in_redo_log context (Redo_log.DeleteRow(tblname, objref)) - end; - Ref_index.remove objref; - remove_ref_from_table_map objref; - (* send the rest of the events *) - mod_events ()) + Database.notify (PreDelete(tblname, objref)) db; + update_database (remove_row_from_table tblname objref); + Database.notify (Delete(tblname, objref, Row.fold (fun k v acc -> (k, v) :: acc) row [])) db; + if Schema.is_table_persistent (Database.schema db) tblname + then update_database Database.increment; + ) (* Create new row in tbl containing specified k-v pairs *) -let create_row tblname kvs new_objref = +let create_row tblname kvs' new_objref = (* Ensure values are valid for UTF-8-encoded XML. *) - let kvs = List.map (fun (key, value) -> (key, ensure_utf8_xml value)) kvs in - - (* fill in default values specifed in datamodel if kv pairs for these are not supplied already *) - let kvs = add_default_kvs kvs tblname in - - (* add the reference to the row itself *) - let kvs = (reference, new_objref) :: kvs in - - let generate_create_event() = - let snapshot = Eventgen.find_get_record tblname ~__context:context ~self:new_objref in - let other_tbl_refs = Eventgen.follow_references tblname in - let other_tbl_refs = - List.fold_left (fun accu (tbl,fld) -> - let fld_value = List.assoc fld kvs in - if is_valid_ref fld_value - then (tbl, fld_value, Eventgen.find_get_record tbl ~__context:context ~self:fld_value) :: accu - else accu) - [] other_tbl_refs in - let other_tbl_events = Eventgen.events_of_other_tbl_refs other_tbl_refs in - begin match snapshot() with - | None -> - error "Failed to generate ADD event for %s %s" tblname new_objref; - Printf.printf "Failed to generate ADD event for %s %s\n%!" tblname new_objref; - | Some snapshot -> - events_notify ~snapshot tblname "add" new_objref; - end; - List.iter (function - | tbl, ref, None -> - error "Failed to generate MOD event for %s %s" tbl ref; - Printf.printf "Failed to generate MOD event for %s %s\n%!" tbl ref; - | tbl, ref, Some s -> - events_notify ~snapshot:s tbl "mod" ref - ) other_tbl_events in + let kvs' = List.map (fun (key, value) -> (key, ensure_utf8_xml value)) kvs' in + + (* we add the reference to the row itself so callers can use read_field_where to + return the reference: awkward if it is just the key *) + let kvs' = (Db_names.ref, new_objref) :: kvs' in + + let row = List.fold_left (fun row (k, v) -> Row.add k v row) Row.empty kvs' in + let schema = Schema.table tblname (Database.schema (get_database ())) in + (* fill in default values if kv pairs for these are not supplied already *) + let row = Row.add_defaults schema row in with_lock (fun () -> - W.debug "create_row %s (%s) [%s]" tblname new_objref (String.concat "," (List.map (fun (k,v)->"("^k^","^"v"^")") kvs)); - invalidate_indexes tblname; - let newrow = table_of_kvs kvs in - let tbl = lookup_table_in_cache cache tblname in - check_unique_table_constraints tblname newrow; - set_row_in_table tbl new_objref newrow; - if (this_table_persists tblname) then - begin - Db_dirty.set_all_row_dirty_status new_objref Db_dirty.New; - Db_dirty.set_all_dirty_table_status tblname; - Db_cache_types.increment Db_backend.cache; - save_in_redo_log context (Redo_log.CreateRow(tblname, new_objref, kvs)) - end; - add_ref_to_table_map new_objref tblname (* track ref against this table *); - let uuid = lookup_field_in_row newrow uuid_fname in - let name_label = try Some (lookup_field_in_row newrow name_label_fname) with _ -> None in - Ref_index.insert {Ref_index.name_label = name_label; Ref_index.uuid = uuid; Ref_index._ref = new_objref }; - - (* generate events *) - begin - try - generate_create_event(); - with Not_found -> - error "Failed to send a create event for %s %s" tblname new_objref - end + W.debug "create_row %s (%s) [%s]" tblname new_objref (String.concat "," (List.map (fun (k,v)->"("^k^","^"v"^")") kvs')); + let db = get_database () in + let tbl = TableSet.find tblname (Database.tableset db) in + update_database (set_row_in_table tblname new_objref row); + + Database.notify (Create(tblname, new_objref, Row.fold (fun k v acc -> (k, v) :: acc) row [])) db; + + if Schema.is_table_persistent (Database.schema db) tblname + then update_database Database.increment; ) (* Do linear scan to find field values which match where clause *) let read_field_where rcd = with_lock (fun () -> - let tbl = lookup_table_in_cache cache rcd.table in - let rec do_find tbl acc = - match tbl with - [] -> acc - | (r::rs) -> - let fv = lookup_field_in_row r rcd.where_field in - if fv=rcd.where_value then do_find rs ((lookup_field_in_row r rcd.return)::acc) - else do_find rs acc in - let rows = get_rowlist tbl in - do_find rows [] + let db = get_database () in + let tbl = TableSet.find rcd.table (Database.tableset db) in + Table.fold + (fun r row acc -> + let field = Row.find rcd.where_field row in + if field = rcd.where_value then Row.find rcd.return row :: acc else acc + ) tbl [] ) let db_get_by_uuid tbl uuid_val = match (read_field_where - {table=tbl; return=reference; - where_field=uuid; where_value=uuid_val}) with + {table=tbl; return=Db_names.ref; + where_field=Db_names.uuid; where_value=uuid_val}) with | [] -> raise (Read_missing_uuid (tbl, "", uuid_val)) | [r] -> r | _ -> raise (Too_many_values (tbl, "", uuid_val)) @@ -384,7 +222,7 @@ let db_get_by_uuid tbl uuid_val = (** Return reference fields from tbl that matches specified name_label field *) let db_get_by_name_label tbl label = read_field_where - {table=tbl; return=reference; + {table=tbl; return=Db_names.ref; where_field=(Escaping.escape_id ["name"; "label"]); where_value=label} @@ -392,19 +230,25 @@ let db_get_by_name_label tbl label = let read_refs tblname = with_lock (fun () -> - get_reflist (lookup_table_in_cache cache tblname)) + let db = get_database () in + let tbl = TableSet.find tblname (Database.tableset db) in + Table.fold (fun r _ acc -> r :: acc) tbl []) -(* Return a list of all the references for which the expression returns true. *) +(* Return a list of all the refs for which the expression returns true. *) let find_refs_with_filter (tblname: string) (expr: Db_filter_types.expr) = with_lock (fun ()-> - let tbl = lookup_table_in_cache cache tblname in - let rows = get_rowlist tbl in + let db = get_database () in + let tbl = TableSet.find tblname (Database.tableset db) in let eval_val row = function | Db_filter_types.Literal x -> x - | Db_filter_types.Field x -> lookup_field_in_row row x in - let rows = List.filter (fun row ->Db_filter.eval_expr (eval_val row) expr) rows in - List.map (fun row -> lookup_field_in_row row reference_fname) rows) + | Db_filter_types.Field x -> Row.find x row in + Table.fold + (fun r row acc -> + if Db_filter.eval_expr (eval_val row) expr + then Row.find Db_names.ref row :: acc else acc + ) tbl [] + ) let read_records_where tbl expr = with_lock @@ -412,108 +256,70 @@ let read_records_where tbl expr = let reqd_refs = find_refs_with_filter tbl expr in List.map (fun ref->ref, read_record tbl ref) reqd_refs ) - -let process_structured_field (key,value) tbl fld objref proc_fn_selector = + +let process_structured_field (key,value) tblname fld objref proc_fn_selector = (* Ensure that both keys and values are valid for UTF-8-encoded XML. *) let key = ensure_utf8_xml key in let value = ensure_utf8_xml value in - let add_set = (fun fv->add_key_to_set key fv) in - let remove_set = (fun fv->List.filter (function SExpr.String x -> x <> key | _ -> true) fv) in - let add_map = (fun fv-> - let kv = SExpr.Node [ SExpr.String key; SExpr.String value ] in - let duplicate = List.fold_left (||) false - (List.map (function SExpr.Node (SExpr.String k :: _) when k = key -> true - | _ -> false) fv) in - if duplicate then begin - error "Duplicate key in set or map: table %s; field %s; ref %s; key %s" tbl fld objref key; - raise (Duplicate_key (tbl,fld,objref,key)); - end; - kv::fv) in - let remove_map = - (fun fv->List.filter (function SExpr.Node [ SExpr.String x; _ ] -> x <> key - | _ -> true) fv) in - let proc_fn = - begin - match proc_fn_selector with - AddSet -> add_set - | RemoveSet -> remove_set - | AddMap -> add_map - | RemoveMap -> remove_map - end in with_lock (fun () -> - let row = find_row cache tbl objref in - let existing_str = lookup_field_in_row row fld in - let existing = parse_sexpr existing_str in - let processed = proc_fn existing in - let processed_str = SExpr.string_of (SExpr.Node processed) in - write_field tbl objref fld processed_str) - + let db = get_database () in + let tbl = TableSet.find tblname (Database.tableset db) in + let row = Table.find_exn tblname objref tbl in + let existing_str = Row.find fld row in + let new_str = match proc_fn_selector with + | AddSet -> add_to_set key existing_str + | RemoveSet -> remove_from_set key existing_str + | AddMap -> + begin + try + add_to_map key value existing_str + with Duplicate -> + error "Duplicate key in set or map: table %s; field %s; ref %s; key %s" tblname fld objref key; + raise (Duplicate_key (tblname,fld,objref,key)); + end + | RemoveMap -> remove_from_map key existing_str in + write_field tblname objref fld new_str) (* -------------------------------------------------------------------- *) - -(* Executed on the master to post-process database after populating cache from db stored on disk *) -let post_populate_hook () = - (* Remove the temporary file used for staging from the metadata LUN -- - * there's no need to keep it and it's preferable for it not to hang - * around. *) - Unixext.unlink_safe Xapi_globs.ha_metadata_db; - (* non-persistent fields will have been flushed to disk anyway [since non-persistent just means dont trigger a flush - if I change]. Hence we blank non-persistent fields with a suitable empty value, depending on their type *) - Db_backend.blow_away_non_persistent_fields(); - (* Flush the in-memory cache to the redo-log *) - Backend_xml.flush_db_to_redo_log Db_backend.cache - -let populate_cache () = - let connections = Db_conn_store.read_db_connections () in +let load connections default_schema = - (* Include a fake connection representing the HA metadata db - (if available). This isn't a full flushing connection per-se but - is only considered as a population source. *) - let fake_ha_dbconn = { Parse_db_conf.dummy_conf with - Parse_db_conf.path = Xapi_globs.ha_metadata_db } in - let connections = - if Sys.file_exists Xapi_globs.ha_metadata_db - then fake_ha_dbconn :: connections else connections in - - let fake_gen_dbconn = { Parse_db_conf.dummy_conf with - Parse_db_conf.path = Xapi_globs.gen_metadata_db } in - let connections = - if Sys.file_exists Xapi_globs.gen_metadata_db - then fake_gen_dbconn :: connections else connections in + (* We also consider populating from the HA metadata LUN and the general metadata LUN *) + let connections = + Parse_db_conf.make Xapi_globs.ha_metadata_db :: + (Parse_db_conf.make Xapi_globs.gen_metadata_db) :: connections in (* If we have a temporary_restore_path (backup uploaded in previous run of xapi process) then restore from that *) - let db = - if Sys.file_exists Xapi_globs.db_temporary_restore_path then begin - (* we know that the backup is XML format so, to get the manifest, we jump right in and use the xml backend directly here.. *) - let manifest = Backend_xml.populate_and_read_manifest Parse_db_conf.backup_file_dbconn in - Db_backend.post_restore_hook manifest; - (* delete file that contained backup *) - Db_backend.try_and_delete_db_file Xapi_globs.db_temporary_restore_path; - Parse_db_conf.backup_file_dbconn - end - else (* if there's no backup to restore from then.. *) - begin - (* Check schema vsn is current; if not try and upgrade; if can't do that then fail startup.. *) - let most_recent_db = Db_connections.pick_most_recent_db connections in - (* populate gets all field names from the existing (old) db file, not the (current) schema... which is nice: *) - Backend_xml.populate most_recent_db; - most_recent_db - end in - (* Always perform the generic database upgrade stuff *) - Db_upgrade.generic_database_upgrade (); - - (* Then look to see whether we have specific upgrade rules to consider *) - if Sys.file_exists db.Parse_db_conf.path then Db_upgrade.maybe_upgrade db; - - post_populate_hook () + let populate db = + Printf.printf "populate\n%!"; + let backup = Parse_db_conf.make Xapi_globs.db_temporary_restore_path in + match Db_connections.choose [ backup ] with + | Some c -> Db_backend.post_restore_hook (Backend_xml.populate default_schema c) + | None -> + begin match Db_connections.choose connections with + | Some c -> Backend_xml.populate default_schema c + | None -> db (* empty *) + end in + + let empty = Database.update_manifest (Manifest.update_schema (fun _ -> Some (default_schema.Schema.major_vsn, default_schema.Schema.minor_vsn))) (Database.make default_schema) in + + let db = + ((Db_backend.blow_away_non_persistent_fields default_schema) + ++ Db_upgrade.maybe_upgrade + ++ Db_upgrade.generic_database_upgrade + ++ populate) empty in -let sync_all_db_connections() = - (* Unconditionally force-flush all databases. *) - List.iter Db_connections.force_flush_all (List.map snd (Db_connections.get_dbs_and_gen_counts())) + db + + +let sync conns db = + (* Flush the in-memory cache to the redo-log *) + Redo_log.flush_db_to_redo_log db; + (* and then to the filesystem *) + List.iter (fun c -> Db_connections.flush c db) conns let flush_dirty dbconn = Db_connections.flush_dirty_and_maybe_exit dbconn None let flush_and_exit dbconn ret_code = ignore (Db_connections.flush_dirty_and_maybe_exit dbconn (Some ret_code)) @@ -594,30 +400,26 @@ let spawn_db_flush_threads() = (* Called by server at start-of-day to initialiase cache. Populates cache and starts flushing threads *) -let initialise () = - populate_cache(); - sync_all_db_connections(); - spawn_db_flush_threads() - -(* entry point for xapi-db-process; initialises a db cache without syncing all db connections "to tip" *) -let initialise_db_cache_nosync() = - populate_cache(); +let make connections default_schema = + let db = load connections default_schema in + let db = Database.reindex db in + update_database (fun _ -> db); + spawn_db_flush_threads() -let dump_db_cache fd = - let db_cache_manifest = Db_cache_types.manifest_of_cache Db_backend.cache in - let time = Unix.gettimeofday() in - (* Snapshot the cache (uses the lock) and then slowly serialise the copy *) - Db_xml.To.fd fd (db_cache_manifest, snapshot Db_backend.cache); - debug "Written xml to fd: (time %f)" (Unix.gettimeofday() -. time) - + (** Return an association list of table name * record count *) let stats () = with_lock (fun () -> - fold_over_tables (fun name tbl acc -> - let size = fold_over_rows (fun _ _ acc -> acc + 1) tbl 0 in - (name, size) :: acc) Db_backend.cache []) + TableSet.fold (fun name tbl acc -> + let size = Table.fold (fun _ _ acc -> acc + 1) tbl 0 in + (name, size) :: acc) + (Database.tableset (Db_backend.get_database ())) + [] + ) +(* Only needed by the DB_ACCESS signature *) +let initialise () = () diff --git a/ocaml/database/db_cache_impl.mli b/ocaml/database/db_cache_impl.mli index 71c1db9a..070a4225 100644 --- a/ocaml/database/db_cache_impl.mli +++ b/ocaml/database/db_cache_impl.mli @@ -1,19 +1,14 @@ include Db_interface.DB_ACCESS -(** [initialise ()] initialises the in-memory cache *) -val initialise : unit -> unit +(** [make connections default_schema] initialises the in-memory cache *) +val make : Parse_db_conf.db_connection list -> Schema.t -> unit (** [flush_and_exit db code] flushes the specific backend [db] and exits xapi with [code] *) val flush_and_exit : Parse_db_conf.db_connection -> int -> unit -(** [initialise_db_cache_nosync ()] is the same as [initialise ()] without - the side-effect of writing to any database files *) -val initialise_db_cache_nosync : unit -> unit - -(** [dump_db_cache fd] writes a snapshot of the database to file descriptor - [fd] *) -val dump_db_cache : Unix.file_descr -> unit +(** [sync db] forcibly flushes the database to disk *) +val sync : Parse_db_conf.db_connection list -> Db_cache_types.Database.t -> unit (** [stats ()] returns some stats data for logging *) val stats : unit -> (string * int) list diff --git a/ocaml/database/db_cache_types.ml b/ocaml/database/db_cache_types.ml index 6f51f326..28c37989 100644 --- a/ocaml/database/db_cache_types.ml +++ b/ocaml/database/db_cache_types.ml @@ -1,152 +1,364 @@ -(* - * Copyright (C) 2006-2009 Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; version 2.1 only. with the special - * exception on linking described in file LICENSE. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - *) open Db_exn -type row = (string, string) Hashtbl.t -type table = (string, row) Hashtbl.t -type cache = { - cache: (string, table) Hashtbl.t; - schema: (int * int) option ref; - generation: Generation.t ref; -} +(** Database tables, columns and rows are all indexed by string, each + using a specialised StringMap *) +module StringMap = struct + include Map.Make(struct + type t = string + let compare = Pervasives.compare + end) + let update key default f t = + let v = try find key t with Not_found -> default in + add key (f v) t +end -type where_record = {table:string; return:string; where_field:string; where_value:string} with rpc -type structured_op_t = AddSet | RemoveSet | AddMap | RemoveMap with rpc +module type VAL = sig + type v +end +(** A specialised StringMap whose range type is V.v *) +module Map2 = functor(V: VAL) -> struct + type t = V.v StringMap.t + let empty = StringMap.empty + let fold = StringMap.fold + let add = StringMap.add + let find = StringMap.find + let mem = StringMap.mem + let iter = StringMap.iter + let remove = StringMap.remove + let update = StringMap.update +end -let string_of_structured_op op = match op with - | AddSet -> "add_set" - | RemoveSet -> "remove_set" - | AddMap -> "add_map" - | RemoveMap -> "remove_map" +module StringStringMap = Map2(struct type v = string end) -type db_dump_manifest = - { - schema_major_vsn : int; - schema_minor_vsn : int; - generation_count : Generation.t - } - -let make_manifest schema_major_vsn schema_minor_vsn gen_count = - { - schema_major_vsn = schema_major_vsn; - schema_minor_vsn = schema_minor_vsn; - generation_count = gen_count - } - -let schema_of_cache cache = match !(cache.schema) with -| None -> (0, 0) -| Some (major, minor) -> major, minor - -let manifest_of_cache cache = - let major, minor = schema_of_cache cache in - make_manifest major minor !(cache.generation) - -let set_schema_vsn cache (major, minor) = cache.schema := Some (major, minor) - -let increment cache = cache.generation := Int64.add !(cache.generation) 1L - -let generation_of_cache cache = !(cache.generation) - -let set_generation cache generation = cache.generation := generation - -(* Our versions of hashtbl.find *) -let lookup_field_in_row row fld = - try - Hashtbl.find row fld - with Not_found -> raise (DBCache_NotFound ("missing field",fld,"")) - -let lookup_table_in_cache cache tbl = - try - Hashtbl.find cache.cache tbl - with Not_found -> raise (DBCache_NotFound ("missing table",tbl,"")) - -let lookup_row_in_table tbl tblname objref = - try - Hashtbl.find tbl objref - with Not_found -> raise (DBCache_NotFound ("missing row",tblname,objref)) - -let iter_over_rows func table = - Hashtbl.iter func table - -let iter_over_tables func cache = - Hashtbl.iter func cache.cache - -let iter_over_fields func row = - Hashtbl.iter func row - -let set_field_in_row row fldname newval = - Hashtbl.replace row fldname newval - -let set_row_in_table table objref newval = - Hashtbl.replace table objref newval - -let set_table_in_cache cache tblname newtbl = - Hashtbl.replace cache.cache tblname newtbl - -let create_empty_row () = Hashtbl.create 20 - -let create_empty_table () = Hashtbl.create 20 - -let create_empty_cache () = { cache = Hashtbl.create 20; schema = ref None; generation = ref Generation.null_generation } - -let fold_over_fields func row acc = Hashtbl.fold func row acc - -let fold_over_rows func table acc = Hashtbl.fold func table acc - -let fold_over_tables func cache acc = Hashtbl.fold func cache.cache acc - -let remove_row_from_table tbl objref = Hashtbl.remove tbl objref - -let get_rowlist tbl = - fold_over_rows (fun k d env -> d::env) tbl [] - -let get_reflist tbl = - fold_over_rows (fun k d env -> k::env) tbl [] - -(* Find row with specified reference in specified table *) -let find_row cache (tblname:string) (objref:string) : row = - let tbl = lookup_table_in_cache cache tblname in - lookup_row_in_table tbl tblname objref - -(* Read column, fname, from database rows: *) -let get_column cache tblname fname = - let rec f rows col_so_far = - match rows with - [] -> col_so_far - | (r::rs) -> - let value = try Some (lookup_field_in_row r fname) with _ -> None in - match value with - None -> f rs col_so_far - | (Some u) -> f rs (u::col_so_far) in - f (get_rowlist (lookup_table_in_cache cache tblname)) [] - -(** Return a snapshot of the database cache suitable for slow marshalling across the network *) -let snapshot cache : cache = - Db_lock.with_lock - (fun () -> - let row table rf vals = - let newrow = create_empty_row () in - iter_over_fields (set_field_in_row newrow) vals; - set_row_in_table table rf newrow in - - let table cache name tbl = - let newtable = create_empty_table () in - iter_over_rows (row newtable) tbl; - set_table_in_cache cache name newtable in - - let newcache = create_empty_cache () in - iter_over_tables (table newcache) cache; +module type ROW = sig + type t + val add: string -> string -> t -> t + val add_defaults: Schema.Table.t -> t -> t + val empty : t + val fold : (string -> string -> 'b -> 'b) -> t -> 'b -> 'b + val find : string -> t -> string + val mem : string -> t -> bool + val iter : (string -> string -> unit) -> t -> unit + val remove : string -> t -> t + val update : string -> string -> (string -> string) -> t -> t +end + +module Row : ROW = struct + include StringStringMap + let find key t = + try find key t + with Not_found -> raise (DBCache_NotFound ("missing field", key, "")) + let add_defaults (schema: Schema.Table.t) t = + List.fold_left (fun t c -> + if not(mem c.Schema.Column.name t) + then match c.Schema.Column.default with + | Some default -> add c.Schema.Column.name default t + | None -> raise (DBCache_NotFound ("missing field", c.Schema.Column.name, "")) + else t) t schema.Schema.Table.columns +end + +module StringRowMap = Map2(struct type v = Row.t end) + +module type TABLE = sig + type t + val add: string -> Row.t -> t -> t + val empty : t + val fold : (string -> Row.t -> 'b -> 'b) -> t -> 'b -> 'b + val find_exn : string -> string -> t -> Row.t + val find : string -> t -> Row.t + val mem : string -> t -> bool + val iter : (string -> Row.t -> unit) -> t -> unit + val remove : string -> t -> t + val update : string -> Row.t -> (Row.t -> Row.t) -> t -> t + + val rows : t -> Row.t list +end + +module Table : TABLE = struct + include StringRowMap + let find_exn tbl key t = + try find key t + with Not_found -> raise (DBCache_NotFound ("missing row", tbl, key)) + let rows t = + fold (fun _ r rs -> r :: rs) t [] +end + +module StringTableMap = Map2(struct type v = Table.t end) + +module type TABLESET = sig + type t + val add: string -> Table.t -> t -> t + val empty : t + val fold : (string -> Table.t -> 'b -> 'b) -> t -> 'b -> 'b + val find : string -> t -> Table.t + val mem : string -> t -> bool + val iter : (string -> Table.t -> unit) -> t -> unit + val remove : string -> t -> t + val update : string -> Table.t -> (Table.t -> Table.t) -> t -> t +end + +module TableSet : TABLESET = struct + include StringTableMap + let find key t = + try find key t + with Not_found -> raise (DBCache_NotFound ("missing table", key, "")) +end + +type common_key = + | Ref of string + | Uuid of string +let string_of_common_key = function + | Ref x -> x + | Uuid x -> x + +module KeyMap = struct + include Map.Make(struct + type t = common_key + let compare = Pervasives.compare + end) + let add_unique tblname fldname k v t = + if mem k t + then raise (Uniqueness_constraint_violation ( tblname, fldname, string_of_common_key k )) + else add k v t +end + + +module Manifest = struct + type t = { + schema : (int * int) option; + generation_count : Generation.t + } + + let empty = { + schema = None; generation_count = Generation.null_generation + } + + let make schema_major_vsn schema_minor_vsn gen_count = { + schema = Some (schema_major_vsn, schema_minor_vsn); + generation_count = gen_count + } + + let generation x = x.generation_count + + let update_generation f x = { + x with generation_count = f x.generation_count + } + + let next = update_generation (Int64.add 1L) + + let schema x = match x.schema with + | None -> 0, 0 + | Some (x, y) -> x, y + + let update_schema f x = { + x with schema = f x.schema + } +end + +(** The core database updates (PreDelete is more of an 'event') *) +type update = + | WriteField of string (* tblname *) * string (* objref *) * string (* fldname *) * string (* oldval *) * string (* newval *) + | PreDelete of string (* tblname *) * string (* objref *) + | Delete of string (* tblname *) * string (* objref *) * (string * string) list (* values *) + | Create of string (* tblname *) * string (* objref *) * (string * string) list (* values *) + +module Database = struct + type t = { + tables: TableSet.t; + manifest : Manifest.t; + schema: Schema.t; + keymap: (string * string) KeyMap.t; + callbacks: (string * (update -> t -> unit)) list + } + let update_manifest f x = + { x with manifest = f x.manifest } + + let manifest x = x.manifest + + let increment = update_manifest Manifest.next + + let tableset x = x.tables + + let schema x = x.schema + + let update f x = + { x with tables = f x.tables } + + let set_generation g = + update_manifest (Manifest.update_generation (fun _ -> g)) + + let update_tableset f x = + { x with tables = f x.tables } + + let update_keymap f x = + { x with keymap = f x.keymap } + + let register_callback name f x = + { x with callbacks = (name, f) :: x.callbacks } + + let unregister_callback name x = + { x with callbacks = List.filter (fun (x, _) -> x <> name) x.callbacks } + + let notify e db = + List.iter (fun (name, f) -> + try + f e db + with e -> + Printf.printf "Caught %s from database callback '%s'\n%!" (Printexc.to_string e) name; + () + ) db.callbacks + + let reindex x = + (* Recompute the keymap *) + let keymap = + TableSet.fold + (fun tblname tbl acc -> + Table.fold + (fun rf row acc -> + let acc = KeyMap.add_unique tblname Db_names.ref (Ref rf) (tblname, rf) acc in + if Row.mem Db_names.uuid row + then KeyMap.add_unique tblname Db_names.uuid (Uuid (Row.find Db_names.uuid row)) (tblname, rf) acc + else acc + ) + tbl acc) + x.tables KeyMap.empty in + + { x with keymap = keymap } + + + let table_of_ref rf db = fst (KeyMap.find (Ref rf) db.keymap) + let lookup_key key db = + if KeyMap.mem (Ref key) db.keymap + then Some (KeyMap.find (Ref key) db.keymap) + else + if KeyMap.mem (Uuid key) db.keymap + then Some (KeyMap.find (Uuid key) db.keymap) + else None + + let make schema = { + tables = TableSet.empty; + manifest = Manifest.empty; + schema = schema; + keymap = KeyMap.empty; + callbacks = []; + } +end + +(* Helper functions to deal with Sets and Maps *) +let add_to_set key t = + let existing = Db_action_helper.parse_sexpr t in + let processed = Db_action_helper.add_key_to_set key existing in + SExpr.string_of (SExpr.Node processed) + +let remove_from_set key t = + let existing = Db_action_helper.parse_sexpr t in + let processed = List.filter (function SExpr.String x -> x <> key | _ -> true) existing in + SExpr.string_of (SExpr.Node processed) + +exception Duplicate +let add_to_map key value t = + let existing = Db_action_helper.parse_sexpr t in + let kv = SExpr.Node [ SExpr.String key; SExpr.String value ] in + let duplicate = List.fold_left (||) false + (List.map (function SExpr.Node (SExpr.String k :: _) when k = key -> true + | _ -> false) existing) in + if duplicate then raise Duplicate; + let processed = kv::existing in + SExpr.string_of (SExpr.Node processed) + +let remove_from_map key t = + let existing = Db_action_helper.parse_sexpr t in + let processed = List.filter (function SExpr.Node [ SExpr.String x; _ ] -> x <> key + | _ -> true) existing in + SExpr.string_of (SExpr.Node processed) + + +let (++) f g x = f (g x) +let id x = x + +let set_table tblname newval = + (Database.update ++ (TableSet.update tblname Table.empty)) + (fun _ -> newval) + +let get_field tblname objref fldname db = + Row.find fldname (Table.find objref (TableSet.find tblname (Database.tableset db))) +let set_field tblname objref fldname newval = + ((Database.update + ++ (TableSet.update tblname Table.empty) + ++ (Table.update objref Row.empty) + ++ (Row.update fldname "")) + (fun _ -> newval)) + +let update_one_to_many tblname objref f db = + List.fold_left (fun db (one_fld, many_tbl, many_fld) -> + (* the value one_fld_val is the Ref _ *) + let one_fld_val = get_field tblname objref one_fld db in + let valid = try ignore(Database.table_of_ref one_fld_val db); true with _ -> false in + if valid + then set_field many_tbl one_fld_val many_fld (f objref (get_field many_tbl one_fld_val many_fld db)) db + else db + ) db (Schema.one_to_many tblname (Database.schema db)) + +let set_row_in_table tblname objref newval = + id + (* For any field in a one-to-many, add objref to the foreign Set(Ref_) fields *) + (* NB this requires the new row to exist already *) + ++ (update_one_to_many tblname objref add_to_set) + ++ ((Database.update ++ (TableSet.update tblname Table.empty) ++ (Table.update objref Row.empty)) + (fun _ -> newval)) + + ++ (Database.update_keymap (KeyMap.add_unique tblname Db_names.ref (Ref objref) (tblname, objref))) + ++ (Database.update_keymap (fun m -> + if Row.mem Db_names.uuid newval + then KeyMap.add_unique tblname Db_names.uuid (Uuid (Row.find Db_names.uuid newval)) (tblname, objref) m + else m)) + + + +let remove_row tblname objref uuid = + id + ++ ((Database.update ++ (TableSet.update tblname Table.empty)) + (Table.remove objref)) + (* For any field in a one-to-many, remove objref from the foreign Set(Ref_) fields *) + (* NB this requires the original row to still exist *) + ++ (update_one_to_many tblname objref remove_from_set) + + ++ (Database.update_keymap (KeyMap.remove (Ref objref))) + ++ (Database.update_keymap (fun m -> + match uuid with + | Some u -> KeyMap.remove (Uuid u) m + | None -> m)) + +let set_field_in_row tblname objref fldname newval db = + if fldname = Db_names.ref + then failwith (Printf.sprintf "Cannot safely update field: %s" fldname); + + let oldrow = Table.find objref (TableSet.find tblname (Database.tableset db)) in + let newrow = Row.add fldname newval oldrow in + let olduuid = try Some(Row.find Db_names.uuid oldrow) with _ -> None in + + ((set_row_in_table tblname objref newrow) + ++ (remove_row tblname objref olduuid)) db + +let remove_row_from_table tblname objref db = + let uuid = + try + Some (Row.find Db_names.uuid (Table.find objref (TableSet.find tblname (Database.tableset db)))) + with _ -> None in + remove_row tblname objref uuid db + + +type where_record = { + table: string; (** table from which ... *) + return: string; (** we'd like to return this field... *) + where_field: string; (** where this other field... *) + where_value: string; (** contains this value *) +} with rpc + +type structured_op_t = + | AddSet + | RemoveSet + | AddMap + | RemoveMap +with rpc - set_generation newcache (generation_of_cache cache); - newcache) diff --git a/ocaml/database/db_cache_types.mli b/ocaml/database/db_cache_types.mli index 58583e69..448db7be 100644 --- a/ocaml/database/db_cache_types.mli +++ b/ocaml/database/db_cache_types.mli @@ -1,68 +1,111 @@ -(* - * Copyright (C) 2006-2009 Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; version 2.1 only. with the special - * exception on linking described in file LICENSE. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - *) -type row -type table -type cache +module Row : + sig + type t + val add : string -> string -> t -> t + val add_defaults : Schema.Table.t -> t -> t + val empty : t + val fold : (string -> string -> 'a -> 'a) -> t -> 'a -> 'a + val find : string -> t -> string + val iter : (string -> string -> unit) -> t -> unit + val remove : string -> t -> t + val update : string -> string -> (string -> string) -> t -> t + end -type where_record = { - table : string; - return : string; - where_field : string; - where_value : string; -} -val rpc_of_where_record: where_record -> Rpc.t -val where_record_of_rpc: Rpc.t -> where_record +module Table : + sig + type t + val add : string -> Row.t -> t -> t + val empty : t + val fold : (string -> Row.t -> 'a -> 'a) -> t -> 'a -> 'a + val find_exn : string -> string -> t -> Row.t + val find : string -> t -> Row.t + val iter : (string -> Row.t -> unit) -> t -> unit + val remove : string -> t -> t + val update : string -> Row.t -> (Row.t -> Row.t) -> t -> t -type structured_op_t = AddSet | RemoveSet | AddMap | RemoveMap -val rpc_of_structured_op_t: structured_op_t -> Rpc.t -val structured_op_t_of_rpc: Rpc.t -> structured_op_t + val rows : t -> Row.t list + end -type db_dump_manifest = { - schema_major_vsn : int; - schema_minor_vsn : int; - generation_count : Int64.t; -} +module TableSet : + sig + type t + val add : string -> Table.t -> t -> t + val empty : t + val fold : (string -> Table.t -> 'a -> 'a) -> t -> 'a -> 'a + val find : string -> t -> Table.t + val iter : (string -> Table.t -> unit) -> t -> unit + val remove : string -> t -> t + val update : string -> Table.t -> (Table.t -> Table.t) -> t -> t + end + +module Manifest : + sig + type t + val empty : t + val make : int -> int -> Generation.t -> t + val generation : t -> Generation.t + val update_generation : (Generation.t -> Generation.t) -> t -> t + val next : t -> t + val schema : t -> int * int + val update_schema : ((int * int) option -> (int * int) option) -> t -> t + end + +(** The core database updates (PreDelete is more of an 'event') *) +type update = + | WriteField of string (* tblname *) * string (* objref *) * string (* fldname *) * string (* oldval *) * string (* newval *) + | PreDelete of string (* tblname *) * string (* objref *) + | Delete of string (* tblname *) * string (* objref *) * (string * string) list (* values *) + | Create of string (* tblname *) * string (* objref *) * (string * string) list (* values *) + +module Database : + sig + type t + val update_manifest : (Manifest.t -> Manifest.t) -> t -> t + val update_tableset : (TableSet.t -> TableSet.t) -> t -> t + val manifest : t -> Manifest.t + val tableset : t -> TableSet.t + val schema : t -> Schema.t + val increment : t -> t + val update : (TableSet.t -> TableSet.t) -> t -> t + val set_generation : Generation.t -> t -> t + val make : Schema.t -> t -val make_manifest : int -> int -> Int64.t -> db_dump_manifest -val manifest_of_cache: cache -> db_dump_manifest + val table_of_ref : string -> t -> string + val lookup_key : string -> t -> (string * string) option + val reindex : t -> t -val schema_of_cache: cache -> int * int -val set_schema_vsn: cache -> int * int -> unit + val register_callback : string -> (update -> t -> unit) -> t -> t + val unregister_callback : string -> t -> t + val notify : update -> t -> unit + end -val generation_of_cache: cache -> Generation.t -val set_generation: cache -> Generation.t -> unit -val increment: cache -> unit -val lookup_field_in_row : row -> string -> string -val lookup_table_in_cache : cache -> string -> table -val lookup_row_in_table : table -> string -> string -> row -val iter_over_rows : (string -> row -> unit) -> table -> unit -val iter_over_tables : (string -> table -> unit) -> cache -> unit -val iter_over_fields : (string -> string -> unit) -> row -> unit -val set_field_in_row : row -> string -> string -> unit -val set_row_in_table : table -> string -> row -> unit -val set_table_in_cache : cache -> string -> table -> unit -val create_empty_row : unit -> row -val create_empty_table : unit -> table -val create_empty_cache : unit -> cache -val fold_over_fields : (string -> string -> 'a -> 'a) -> row -> 'a -> 'a -val fold_over_rows : (string -> row -> 'a -> 'a) -> table -> 'a -> 'a -val fold_over_tables : (string -> table -> 'a -> 'a) -> cache -> 'a -> 'a +exception Duplicate +val add_to_set : string -> string -> string +val remove_from_set : string -> string -> string +val add_to_map : string -> string -> string -> string +val remove_from_map : string -> string -> string + +val set_table : string -> Table.t -> Database.t -> Database.t +val set_row_in_table : string -> string -> Row.t -> Database.t -> Database.t +val set_field_in_row : + string -> string -> string -> string -> Database.t -> Database.t +val remove_row_from_table : string -> string -> Database.t -> Database.t + +type where_record = { + table: string; (** table from which ... *) + return: string; (** we'd like to return this field... *) + where_field: string; (** where this other field... *) + where_value: string; (** contains this value *) +} +val where_record_of_rpc: Rpc.t -> where_record +val rpc_of_where_record: where_record -> Rpc.t + +type structured_op_t = + | AddSet + | RemoveSet + | AddMap + | RemoveMap +val structured_op_t_of_rpc: Rpc.t -> structured_op_t +val rpc_of_structured_op_t: structured_op_t -> Rpc.t -val get_rowlist : table -> row list -val get_reflist : table -> string list -val get_column : cache -> string -> string -> string list -val find_row : cache -> string -> string -> row -val remove_row_from_table : table -> string -> unit -val snapshot : cache -> cache diff --git a/ocaml/database/db_conn_store.ml b/ocaml/database/db_conn_store.ml index 3068037c..3d3827ee 100644 --- a/ocaml/database/db_conn_store.ml +++ b/ocaml/database/db_conn_store.ml @@ -14,8 +14,6 @@ (* ------------------- List of db connections that are active (read from db.conf file) *) let db_connections : Parse_db_conf.db_connection list ref = ref [] (* initalised by ocaml/xapi/xapi.ml *) -exception Cannot_populate_database (* should never be thrown *) -exception No_db_connections_available (* Locks for each db connection *) let db_conn_locks_m = Mutex.create() (* mutex used to protect access to table of mutexes -- nice! *) diff --git a/ocaml/database/db_connections.ml b/ocaml/database/db_connections.ml index 89e19f34..5dd27f39 100644 --- a/ocaml/database/db_connections.ml +++ b/ocaml/database/db_connections.ml @@ -16,22 +16,25 @@ module R = Debug.Debugger(struct let name = "redo_log" end) open D let get_dbs_and_gen_counts() = - List.map (fun conn->(Generation.read conn, conn)) (Db_conn_store.read_db_connections()) - -exception No_databases + List.map (fun conn->(Parse_db_conf.generation_read conn, conn)) (Db_conn_store.read_db_connections()) +(** Returns true if the supplied connection actually exists, false otherwise *) +let exists connection = + Sys.file_exists (Parse_db_conf.generation_filename connection) + && (Sys.file_exists connection.Parse_db_conf.path) + (* This returns the most recent of the db connections to populate from. It also initialises the in-memory generation count to the largest of the db connections' generation counts *) -let pick_most_recent_db = function -| [] -> raise No_databases +let choose connections = match List.filter exists connections with +| [] -> None | (c :: cs) as connections -> - List.iter (fun c -> debug "Dbconf contains: %s (generation %Ld)" c.Parse_db_conf.path (Generation.read c)) connections; + List.iter (fun c -> debug "Dbconf contains: %s (generation %Ld)" c.Parse_db_conf.path (Parse_db_conf.generation_read c)) connections; let gen, most_recent = List.fold_left (fun (g, c) c' -> - let g' = Generation.read c' in + let g' = Parse_db_conf.generation_read c' in if g' > g then (g', c') else (g, c)) - (Generation.read c, c) cs in + (Parse_db_conf.generation_read c, c) cs in debug "Most recent db is %s (generation %Ld)" most_recent.Parse_db_conf.path gen; - most_recent + Some most_recent let preferred_write_db () = List.hd (Db_conn_store.read_db_connections()) (* !!! FIX ME *) @@ -95,25 +98,13 @@ let flush_dirty_and_maybe_exit dbconn exit_spec = end; was_anything_flushed ) -(* -let create_empty_db (major, minor) dbconn = - Generation.create_fresh dbconn; - Backend_xml.create_empty_db (major, minor) dbconn - *) -let maybe_create_new_db (major,minor) dbconn = - if not (Sys.file_exists dbconn.Parse_db_conf.path) - then Backend_xml.create_empty_db (major,minor) dbconn -let force_flush_all dbconn = +let flush dbconn db = debug "About to flush database: %s" dbconn.Parse_db_conf.path; Db_conn_store.with_db_conn_lock dbconn (fun () -> - Backend_xml.force_flush_all dbconn None + Backend_xml.flush dbconn db ) -let force_flush_specified_cache dbconn cache = - Db_conn_store.with_db_conn_lock dbconn - (fun () -> - Backend_xml.force_flush_all dbconn (Some cache) - ) + diff --git a/ocaml/database/db_dirty.ml b/ocaml/database/db_dirty.ml deleted file mode 100644 index 9fdcbbc8..00000000 --- a/ocaml/database/db_dirty.ml +++ /dev/null @@ -1,84 +0,0 @@ -(* - * Copyright (C) 2006-2009 Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; version 2.1 only. with the special - * exception on linking described in file LICENSE. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - *) - -(* ------------------- Keep a per dbconn cache of which rows/tables are dirty *) - -(* Keep track of what's dirty in the in-memory cache: *) - -(* Note that "Clean" is only used as a return value when nothing is in the dirty_rows entry for - a particular db connection. We never store an explicit "Clean" entry in the dirty_rows table; - instead the absence of an entry for a given object reference tells us that the row is clean *) -type status = New | Modified | Clean -type dirty_record = {dirty_tables : (string,unit) Hashtbl.t; - dirty_rows : (string,status) Hashtbl.t} -let dirty_records : (Parse_db_conf.db_connection, dirty_record) Hashtbl.t = Hashtbl.create 20 -let make_new_dirty_record() = {dirty_tables = Hashtbl.create 20; dirty_rows = Hashtbl.create 20} - -let foreach_db_connection f = - let dirty_records = List.map (fun dbconn -> Hashtbl.find dirty_records dbconn) (Db_conn_store.read_db_connections()) in - List.iter f dirty_records - -let for_my_db_connection dbconn f = - let my_record = Hashtbl.find dirty_records dbconn in - f my_record - -let make_blank_dirty_records() = - List.iter (fun dbconn -> Hashtbl.replace dirty_records dbconn (make_new_dirty_record())) (Db_conn_store.read_db_connections()) - -(* common fns that are called from fns below *) - -(* When we set row dirty status then "New" must always take precendence over "Modified" *) -let set_row_dirty_status objref status dr = - let doset() = Hashtbl.replace dr.dirty_rows objref status in - match status with - | Modified -> - begin - try - let current_state = Hashtbl.find dr.dirty_rows objref in - if current_state<>New then doset() (* do not override new with modified *) - with _ -> doset() (* not found *) - end - | _ -> doset() - -let clear_row_dirty_status objref dr = Hashtbl.remove dr.dirty_rows objref -let set_dirty_table_status tblname dr = Hashtbl.replace dr.dirty_tables tblname () -let clear_dirty_table_status tblname dr = Hashtbl.remove dr.dirty_tables tblname - -let clear_my_row_dirty_status dbconn objref = - for_my_db_connection dbconn (clear_row_dirty_status objref) -let clear_my_dirty_table_status dbconn tblname = - for_my_db_connection dbconn (clear_dirty_table_status tblname) - -(* Functions to manipulate dirty status (below) must always be called from a context in which the - database is locked. *) -(* "New" status must always take precendence over "Modified" status *) -let set_all_row_dirty_status objref status = - foreach_db_connection (set_row_dirty_status objref status) - -let clear_all_row_dirty_status objref = - foreach_db_connection (clear_row_dirty_status objref) - -let set_all_dirty_table_status tblname = - foreach_db_connection (set_dirty_table_status tblname) - -let clear_all_dirty_table_status tblname = - foreach_db_connection (clear_dirty_table_status tblname) - -let read_my_dirty_table_status dbconn tblname = - for_my_db_connection dbconn (fun dr->Hashtbl.mem dr.dirty_tables tblname) - -let read_my_row_dirty_status dbconn objref = - try - for_my_db_connection dbconn (fun dr->Hashtbl.find dr.dirty_rows objref) - with Not_found -> Clean diff --git a/ocaml/database/db_interface.ml b/ocaml/database/db_interface.ml index 8188b00e..b7f0f749 100644 --- a/ocaml/database/db_interface.ml +++ b/ocaml/database/db_interface.ml @@ -60,7 +60,7 @@ module type DB_ACCESS = sig val db_get_by_name_label : string -> string -> string list (** [read_set_ref {tbl,return,where_field,where_value}] is identical - to [read_field_where ...] except it builds and consults an index *) + to [read_field_where ...]. *) val read_set_ref : Db_cache_types.where_record -> string list (** [create_row tbl kvpairs ref] create a new row in [tbl] with @@ -96,3 +96,5 @@ module type DB_ACCESS = sig string * string -> string -> string -> string -> Db_cache_types.structured_op_t -> unit end + + diff --git a/ocaml/database/db_upgrade.ml b/ocaml/database/db_upgrade.ml index e86dad74..fc0c0778 100644 --- a/ocaml/database/db_upgrade.ml +++ b/ocaml/database/db_upgrade.ml @@ -17,6 +17,7 @@ open D open Db_cache_types open Stringext +open Pervasiveext open Vm_memory_constraints.Vm_memory_constraints (* ---------------------- upgrade db file from last release schema -> current schema. @@ -41,21 +42,22 @@ module Names = Db_names type upgrade_rule = { description: string; version: int * int; (** rule will be applied if the schema version is <= this number *) - fn: unit -> unit; + fn: Database.t -> Database.t; } (** Apply all the rules needed for the previous_version *) -let apply_upgrade_rules rules previous_version = +let apply_upgrade_rules rules previous_version db = debug "Looking for database upgrade rules:"; let required_rules = List.filter (fun r -> previous_version <= r.version) rules in - List.iter - (fun r -> - debug "Applying database upgrade rule: %s" r.description; - try - r.fn () - with exn -> - error "Database upgrade rule '%s' failed: %s" r.description (Printexc.to_string exn) - ) required_rules + List.fold_left + (fun db r -> + debug "Applying database upgrade rule: %s" r.description; + try + r.fn db + with exn -> + error "Database upgrade rule '%s' failed: %s" r.description (Printexc.to_string exn); + db + ) db required_rules let (+++) = Int64.add @@ -77,101 +79,52 @@ properties to safe defaults to avoid triggering something bad. {- t.dynamic_min := s.target}}} } *) -let upgrade_vm_records () = +let upgrade_vm_records db : Database.t = debug "Upgrading VM.memory_dynamic_{min,max} in guest and control domains."; - let vm_table = lookup_table_in_cache Db_backend.cache Names.vm in - let vm_rows = get_rowlist vm_table in - (* Upgrade the memory constraints of each virtual machine. *) - List.iter - (fun vm_row -> - (* Helper functions to access the database. *) - let get field_name = Int64.of_string - (lookup_field_in_row vm_row field_name) in - let set field_name value = set_field_in_row - vm_row field_name (Int64.to_string value) in - if (lookup_field_in_row vm_row Names.is_control_domain = "true") - then begin - let target = get Names.memory_target in - set Names.memory_dynamic_min target; - set Names.memory_dynamic_max target; - debug "VM %s (%s) dynamic_{min,max} <- %Ld" - (lookup_field_in_row vm_row Names.uuid) - (lookup_field_in_row vm_row Names.name_label) - target; - end else begin - (* Note this will also transform templates *) - let safe_constraints = reset_to_safe_defaults ~constraints: - { static_min = get Names.memory_static_min - ; dynamic_min = get Names.memory_dynamic_min - ; target = get Names.memory_target - ; dynamic_max = get Names.memory_dynamic_max - ; static_max = get Names.memory_static_max - } in - set Names.memory_static_min (safe_constraints.static_min ); - set Names.memory_dynamic_min (safe_constraints.dynamic_min); - set Names.memory_target (safe_constraints.target ); - set Names.memory_dynamic_max (safe_constraints.dynamic_max); - set Names.memory_static_max (safe_constraints.static_max ); + let ts = Database.tableset db in + let vm_table = TableSet.find Names.vm ts in + + let update_row vm_row = + (* Helper functions to access the database. *) + let get field_name = Int64.of_string + (Row.find field_name vm_row) in + let set field_name value vm_row = Row.add + field_name (Int64.to_string value) vm_row in + if Row.find Names.is_control_domain vm_row = "true" then begin + let target = get Names.memory_target in + debug "VM %s (%s) dynamic_{min,max} <- %Ld" + (Row.find Names.uuid vm_row) + (Row.find Names.name_label vm_row) + target; + ((set Names.memory_dynamic_min target) + ++ (set Names.memory_dynamic_max target)) + vm_row + end else begin + (* Note this will also transform templates *) + let safe_constraints = reset_to_safe_defaults ~constraints: + { static_min = get Names.memory_static_min + ; dynamic_min = get Names.memory_dynamic_min + ; target = get Names.memory_target + ; dynamic_max = get Names.memory_dynamic_max + ; static_max = get Names.memory_static_max + } in debug "VM %s (%s) dynamic_{min,max},target <- %Ld" - (lookup_field_in_row vm_row Names.uuid) - (lookup_field_in_row vm_row Names.name_label) + (Row.find Names.uuid vm_row) + (Row.find Names.name_label vm_row) safe_constraints.static_max; - end; - ) - vm_rows - -(* -let update_templates () = - let vm_table = lookup_table_in_cache Db_backend.cache Names.vm in - let vm_rows = get_rowlist vm_table in - (* Upgrade the memory constraints of each virtual machine. *) - List.iter (fun vm_row -> - (* CA-18974: We accidentally shipped Miami creating duplicate keys in template other-config; need to strip these out across - upgrade *) - let other_config = lookup_field_in_row vm_row Names.other_config in - let other_config_kvs = String_unmarshall_helper.map (fun x->x) (fun x->x) other_config in - (* so it turns out that it was actually the (k,v) pair as a whole that was duplicated, - so we can just call setify on the whole key,value pair list directly; - we don't have to worry about setifying the keys separately *) - let dups_removed = Listext.List.setify other_config_kvs in - (* marshall again and write back to dbrow *) - let dups_removed = String_marshall_helper.map (fun x->x) (fun x->x) dups_removed in - set_field_in_row vm_row Names.other_config dups_removed; + ((set Names.memory_static_min (safe_constraints.static_min )) + ++ (set Names.memory_dynamic_min (safe_constraints.dynamic_min)) + ++ (set Names.memory_target (safe_constraints.target)) + ++ (set Names.memory_dynamic_max (safe_constraints.dynamic_max)) + ++ (set Names.memory_static_max (safe_constraints.static_max ))) + vm_row + end in + let vm_table = Table.fold (fun r row acc -> Table.add r (update_row row) acc) vm_table Table.empty in + set_table Names.vm vm_table db - if bool_of_string (lookup_field_in_row vm_row Names.is_a_template) && - (List.mem_assoc Xapi_globs.default_template_key other_config_kvs) then - let default_template_key_val = List.assoc Xapi_globs.default_template_key other_config_kvs in - if default_template_key_val="true" then - begin - (* CA-18035: Add viridian flag to built-in templates (_not custom ones_) across upgrade *) - let platform = lookup_field_in_row vm_row Names.platform in - let platform_kvs = String_unmarshall_helper.map (fun x->x) (fun x->x) platform in - let platform_kvs = - if not (List.mem_assoc Xapi_globs.viridian_key_name platform_kvs) then - (Xapi_globs.viridian_key_name,Xapi_globs.default_viridian_key_value)::platform_kvs else platform_kvs in - let platform_kvs = String_marshall_helper.map (fun x->x) (fun x->x) platform_kvs in - set_field_in_row vm_row Names.platform platform_kvs; - - (* CA-19924 If template name is "Red Hat Enterprise Linux 5.2" || "Red Hat Enterprise Linux 5.2 x64" then we need to ensure that - we have ("machine-address-size", "36") in other_config. This is because the RHEL5.2 template changed between beta1 and beta2 - and we need to make sure it's the same after upgrade.. - *) - let template_name_label = lookup_field_in_row vm_row Names.name_label in - let other_config = lookup_field_in_row vm_row Names.other_config in - let other_config = String_unmarshall_helper.map (fun x->x) (fun x->x) other_config in - let other_config = - if (template_name_label="Red Hat Enterprise Linux 5.2" || template_name_label="Red Hat Enterprise Linux 5.2 x64") - && (not (List.mem_assoc Xapi_globs.machine_address_size_key_name other_config)) then - (Xapi_globs.machine_address_size_key_name, Xapi_globs.machine_address_size_key_value)::other_config else other_config in - let other_config = String_marshall_helper.map (fun x->x) (fun x->x) other_config in - set_field_in_row vm_row Names.other_config other_config - - end - ) vm_rows -*) (* GEORGE OEM -> BODIE/MNR *) -let upgrade_bios_strings () = +let upgrade_bios_strings db = let oem_manufacturer = try let ic = open_in "/var/tmp/.previousInventory" in @@ -186,49 +139,52 @@ let upgrade_bios_strings () = with _ -> None in let update_vms bios_strings = - let vm_table = lookup_table_in_cache Db_backend.cache Names.vm in - let vm_rows = get_rowlist vm_table in + let ts = Database.tableset db in + let vm_table = TableSet.find Names.vm ts in let bios_strings_kvs = String_marshall_helper.map (fun x->x) (fun x->x) bios_strings in - let update vm_row = - set_field_in_row vm_row Names.bios_strings bios_strings_kvs - in - List.iter update vm_rows + let update_row row = + Row.add Names.bios_strings bios_strings_kvs row in + let vm_table = Table.fold (fun r row tbl -> Table.add r (update_row row) tbl) vm_table Table.empty in + set_table Names.vm vm_table in match oem_manufacturer with | Some oem -> info "Upgrade from OEM edition (%s)." oem; if String.has_substr oem "HP" then begin debug "Using old HP BIOS strings"; - update_vms Xapi_globs.old_hp_bios_strings + update_vms Xapi_globs.old_hp_bios_strings db end else if String.has_substr oem "Dell" then begin debug "Using old Dell BIOS strings"; - update_vms Xapi_globs.old_dell_bios_strings - end + update_vms Xapi_globs.old_dell_bios_strings db + end else db | None -> info "Upgrade from retail edition."; debug "Using generic BIOS strings"; - update_vms Xapi_globs.generic_bios_strings + update_vms Xapi_globs.generic_bios_strings db -let update_snapshots () = +let update_snapshots db = (* GEORGE -> MIDNIGHT RIDE *) - let vm_table = lookup_table_in_cache Db_backend.cache Names.vm in - let vm_rows = get_rowlist vm_table in - let update_snapshots vm_row = - let vm = lookup_field_in_row vm_row Names.ref in - let snapshot_rows = List.filter (fun s -> lookup_field_in_row s Names.snapshot_of = vm) vm_rows in + let ts = Database.tableset db in + let vm_table = TableSet.find Names.vm ts in + let vm_rows = Table.rows vm_table in + let update_snapshots vm_row vm_table : Table.t = + let vm = Row.find Names.ref vm_row in + let snapshot_rows = List.filter (fun s -> Row.find Names.snapshot_of s = vm) vm_rows in let compare s1 s2 = - let t1 = lookup_field_in_row s1 Names.snapshot_time in - let t2 = lookup_field_in_row s2 Names.snapshot_time in + let t1 = Row.find Names.snapshot_time s1 in + let t2 = Row.find Names.snapshot_time s2 in compare t1 t2 in let ordered_snapshot_rows = List.sort compare snapshot_rows in - debug "Snapshots(%s) = {%s}" vm (String.concat ", " (List.map (fun s -> lookup_field_in_row s Names.ref) ordered_snapshot_rows)); - let rec aux = function - | [] | [_] -> () + debug "Snapshots(%s) = {%s}" vm (String.concat ", " (List.map (fun s -> Row.find Names.ref s) ordered_snapshot_rows)); + let rec aux snaps vm_table = match snaps with + | [] | [_] -> vm_table | s1 :: s2 :: t -> - set_field_in_row s2 Names.parent (lookup_field_in_row s1 Names.ref); - aux (s2 :: t) in - aux (ordered_snapshot_rows @ [ vm_row]) in - List.iter update_snapshots vm_rows + let row' = Row.add Names.parent (Row.find Names.ref s1) s2 in + let vm_table = Table.add (Row.find Names.ref s2) s2 vm_table in + aux (s2 :: t) vm_table in + aux (ordered_snapshot_rows @ [ vm_row]) vm_table in + let vm_table = Table.fold (fun _ vm_row tbl -> update_snapshots vm_row tbl) vm_table vm_table in + set_table Names.vm vm_table db (** A list of all the custom database upgrade rules known to the system. *) let upgrade_rules = @@ -246,47 +202,48 @@ let upgrade_rules = (** {Generic database upgrade handling} *) (** Automatically insert blank tables and new columns with default values *) -let generic_database_upgrade () = - let existing_table_names = fold_over_tables (fun name _ acc -> name :: acc) Db_backend.cache [] in - let api_table_names = List.map (fun x -> Escaping.escape_obj x.Datamodel_types.name) Db_backend.api_objs in - let created_table_names = Listext.List.set_difference api_table_names existing_table_names in - let deleted_table_names = Listext.List.set_difference existing_table_names api_table_names in - List.iter (fun tblname -> - debug "Adding new database table: '%s'" tblname; - let newtbl = create_empty_table () in - set_table_in_cache Db_backend.cache tblname newtbl) created_table_names; - List.iter (fun tblname -> - debug "Ignoring legacy database table: '%s'" tblname - ) deleted_table_names; +let generic_database_upgrade db = + let existing_table_names = TableSet.fold (fun name _ acc -> name :: acc) (Database.tableset db) [] in + let schema_table_names = Schema.table_names (Database.schema db) in + let created_table_names = Listext.List.set_difference schema_table_names existing_table_names in + let deleted_table_names = Listext.List.set_difference existing_table_names schema_table_names in + let db = Database.update + (fun ts -> + List.fold_left (fun ts tblname -> + debug "Adding new database table: '%s'" tblname; + TableSet.add tblname Table.empty ts) ts created_table_names) db in (* for each table, go through and fill in missing default values *) - List.iter - (fun tblname -> - let tbl = lookup_table_in_cache Db_backend.cache tblname in - let rows = get_rowlist tbl in - let add_fields_to_row objref r = - let kvs = fold_over_fields (fun k v env -> (k,v)::env) r [] in - let new_kvs = Db_backend.add_default_kvs kvs tblname in - (* now blank r and fill it with new kvs: *) - let newrow = create_empty_row () in - List.iter (fun (k,v) -> set_field_in_row newrow k v) new_kvs; - set_row_in_table tbl objref newrow - in - iter_over_rows add_fields_to_row tbl) - api_table_names + List.fold_left + (fun db tblname -> + let tbl = TableSet.find tblname (Database.tableset db) in + let schema = Schema.table tblname (Database.schema db) in + let rows = Table.rows tbl in + let add_fields_to_row objref r db : Database.t = + let row = Row.add_defaults schema r in + let tbl = Table.add objref row tbl in + set_table tblname tbl db in + Table.fold add_fields_to_row tbl db + ) db schema_table_names (* Maybe upgrade most recent db *) -let maybe_upgrade most_recent_db = - let (previous_major_vsn, previous_minor_vsn) as previous_vsn = Backend_xml.read_schema_vsn most_recent_db in +let maybe_upgrade db = + let (previous_major_vsn, previous_minor_vsn) as previous_vsn = Manifest.schema (Database.manifest db) in let (latest_major_vsn, latest_minor_vsn) as latest_vsn = Datamodel.schema_major_vsn, Datamodel.schema_minor_vsn in let previous_string = Printf.sprintf "(%d, %d)" previous_major_vsn previous_minor_vsn in let latest_string = Printf.sprintf "(%d, %d)" latest_major_vsn latest_minor_vsn in debug "Database schema version is %s; binary schema version is %s" previous_string latest_string; - if previous_vsn > latest_vsn - then warn "Database schema version %s is more recent than binary %s: downgrade is unsupported." previous_string previous_string - else - if previous_vsn < latest_vsn then begin - apply_upgrade_rules upgrade_rules previous_vsn; - debug "Upgrade rules applied, bumping schema version to %d.%d" latest_major_vsn latest_minor_vsn; - Db_cache_types.set_schema_vsn Db_backend.cache latest_vsn - end else debug "Database schemas match, no upgrade required" + if previous_vsn > latest_vsn then begin + warn "Database schema version %s is more recent than binary %s: downgrade is unsupported." previous_string previous_string; + db + end else begin + if previous_vsn < latest_vsn then begin + let db = apply_upgrade_rules upgrade_rules previous_vsn db in + debug "Upgrade rules applied, bumping schema version to %d.%d" latest_major_vsn latest_minor_vsn; + (Database.update_manifest ++ Manifest.update_schema) + (fun _ -> Some (latest_major_vsn, latest_minor_vsn)) db + end else begin + debug "Database schemas match, no upgrade required"; + db + end + end diff --git a/ocaml/database/db_xml.ml b/ocaml/database/db_xml.ml index 5b61aebe..e564fa71 100644 --- a/ocaml/database/db_xml.ml +++ b/ocaml/database/db_xml.ml @@ -1,5 +1,5 @@ (* - * Copyright (C) 2006-2009 Citrix Systems Inc. + * Copyright (C) 2006-2010 Citrix Systems Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published @@ -40,9 +40,9 @@ module To = struct let int64 (output: Xmlm.output) (key: string) (x: Int64.t) = pair output key (Int64.to_string x) (* Marshal a whole database table to an Xmlm output abstraction *) - let table (output: Xmlm.output) name (tbl: table) = - let record rf (row: row) = - let (tag: Xmlm.tag) = make_tag "row" (("ref", rf) :: (fold_over_fields (fun k v acc -> (k, Xml_spaces.protect v) :: acc) row [])) in + let table schema (output: Xmlm.output) name (tbl: Table.t) = + let record rf (row: Row.t) = + let (tag: Xmlm.tag) = make_tag "row" (("ref", rf) :: (Row.fold (fun k v acc -> (k, Xml_spaces.protect v) :: acc) row [])) in Xmlm.output output (`El_start tag); Xmlm.output output `El_end in let tag = make_tag "table" [ "name", name ] in @@ -50,99 +50,101 @@ module To = struct (* we write a table entry whether or not the table persists, because populate happens to assume that all tables will be present. However, if the table is marked as "don't persist" then we don't write any row entries: *) - if Db_backend.this_table_persists name then - iter_over_rows record tbl; + if Schema.is_table_persistent schema name + then Table.iter record tbl; Xmlm.output output `El_end (* Write out a manifest *) - let manifest (output: Xmlm.output) (manifest: db_dump_manifest) : unit = - Xmlm.output output (`El_start (make_tag "manifest" [])); - int output _schema_major_vsn manifest.schema_major_vsn; - int output _schema_minor_vsn manifest.schema_minor_vsn; - int64 output _generation_count manifest.generation_count; - Xmlm.output output `El_end - - (* Write out a full database cache dump *) - let cache (output: Xmlm.output) (m, cache) : unit = + let manifest (output: Xmlm.output) (manifest: Manifest.t) : unit = + Xmlm.output output (`El_start (make_tag "manifest" [])); + let major, minor = Manifest.schema manifest in + int output _schema_major_vsn major; + int output _schema_minor_vsn minor; + int64 output _generation_count (Manifest.generation manifest); + Xmlm.output output `El_end + + (* Write out a full database *) + let database (output: Xmlm.output) db : unit = Xmlm.output output (`Dtd None); Xmlm.output output (`El_start (make_tag "database" [])); - manifest output m; - iter_over_tables (table output) cache; + manifest output (Database.manifest db); + TableSet.iter (table (Database.schema db) output) (Database.tableset db); Xmlm.output output `El_end - let fd (fd: Unix.file_descr) (m, c) : unit = + let fd (fd: Unix.file_descr) db : unit = let oc = Unix.out_channel_of_descr fd in - cache (Xmlm.make_output (`Channel oc)) (m, c); + database (Xmlm.make_output (`Channel oc)) db; flush oc - let file (filename: string) (m, c) : unit = + let file (filename: string) db : unit = let fdescr = Unix.openfile filename [ Unix.O_WRONLY; Unix.O_CREAT; Unix.O_TRUNC ] 0o600 in finally - (fun () -> fd fdescr (m, c)) + (fun () -> fd fdescr db) (fun () -> Unix.close fdescr) end module From = struct - let cache (input: Xmlm.input) = - let tags = Stack.create () in - let maybe_return f accu = - if Xmlm.eoi input then begin - if Stack.is_empty tags then - accu - else - raise (Unmarshall_error "Unexpected end of file") - end else - f accu in - let rec f ((cache, table, manifest) as acc) = match Xmlm.input input with - (* On reading a start tag... *) - | `El_start (tag: Xmlm.tag) -> - Stack.push tag tags; - begin match tag with - | (_, ("database" | "manifest")), _ -> f acc - | (_, "table"), [ (_, "name"), _ ] -> - f (cache, create_empty_table (), manifest) - | (_, "row"), ((_, "ref"), rf) :: rest -> - let row = create_empty_row () in - List.iter (fun (("", k), v) -> set_field_in_row row k (Xml_spaces.unprotect v)) rest; - set_row_in_table table rf row; - f acc - | (_, "pair"), [ (_, "key"), k; (_, "value"), v ] -> - f (cache, table, (k, v) :: manifest) - | (_, name), _ -> raise (Unmarshall_error (Printf.sprintf "Unexpected tag: %s" name)) - end - (* On reading an end tag... *) - | `El_end -> - let tag = Stack.pop tags in - begin match tag with - | (_, ("database" | "manifest" | "row" | "pair")), _ -> maybe_return f acc - | (_, "table"), [ (_, "name"), name ] -> - set_table_in_cache cache name table; - maybe_return f acc - | (_, name), _ -> raise (Unmarshall_error (Printf.sprintf "Unexpected tag: %s" name)) - end - | _ -> f acc - in - let (cache, _, manifest) = f (create_empty_cache (), create_empty_table (), []) in - let generation_count = Int64.of_string (List.assoc _generation_count manifest) in - Db_cache_types.set_generation cache generation_count; - (* Manifest is actually a record *) - let manifest = { - schema_major_vsn = int_of_string (List.assoc _schema_major_vsn manifest); - schema_minor_vsn = int_of_string (List.assoc _schema_minor_vsn manifest); - generation_count = generation_count - } in - - manifest, cache - - let file xml_filename = - let input = open_in xml_filename in - finally - (fun () -> cache (Xmlm.make_input (`Channel input))) - (fun () -> close_in input) - - let channel inchan = - cache (Xmlm.make_input (`Channel inchan)) + let database schema (input: Xmlm.input) = + let tags = Stack.create () in + let maybe_return f accu = + if Xmlm.eoi input then begin + if Stack.is_empty tags then + accu + else + raise (Unmarshall_error "Unexpected end of file") + end else + f accu in + let rec f ((tableset, table, manifest) as acc) = match Xmlm.input input with + (* On reading a start tag... *) + | `El_start (tag: Xmlm.tag) -> + Stack.push tag tags; + begin match tag with + | (_, ("database" | "manifest")), _ -> f acc + | (_, "table"), [ (_, "name"), _ ] -> + f (tableset, Table.empty, manifest) + | (_, "row"), ((_, "ref"), rf) :: rest -> + (* Remove any other duplicate "ref"s which might have sneaked in there *) + let rest = List.filter (fun (_, k) -> k <> "ref") rest in + let row = List.fold_left (fun row ((_, k), v) -> + Row.add k (Xml_spaces.unprotect v) row + ) Row.empty rest in + f (tableset, Table.add rf row table, manifest) + | (_, "pair"), [ (_, "key"), k; (_, "value"), v ] -> + f (tableset, table, (k, v) :: manifest) + | (_, name), _ -> + raise (Unmarshall_error (Printf.sprintf "Unexpected tag: %s" name)) + end + (* On reading an end tag... *) + | `El_end -> + let tag = Stack.pop tags in + begin match tag with + | (_, ("database" | "manifest" | "row" | "pair")), _ -> maybe_return f acc + | (_, "table"), [ (_, "name"), name ] -> + maybe_return f (TableSet.add name table tableset, Table.empty, manifest) + | (_, name), _ -> + raise (Unmarshall_error (Printf.sprintf "Unexpected tag: %s" name)) + end + | _ -> f acc + in + let (ts, _, manifest) = f (TableSet.empty, Table.empty, []) in + let g = Int64.of_string (List.assoc _generation_count manifest) in + let major_vsn = int_of_string (List.assoc _schema_major_vsn manifest) in + let minor_vsn = int_of_string (List.assoc _schema_minor_vsn manifest) in + let manifest = Manifest.make major_vsn minor_vsn g in + ((Database.update_manifest (fun _ -> manifest)) + ++ (Database.update_tableset (fun _ -> ts))) + (Database.make schema) + + + let file schema xml_filename = + let input = open_in xml_filename in + finally + (fun () -> database schema (Xmlm.make_input (`Channel input))) + (fun () -> close_in input) + + let channel schema inchan = + database schema (Xmlm.make_input (`Channel inchan)) end diff --git a/ocaml/database/escaping.ml b/ocaml/database/escaping.ml index 01fe187b..f6663bee 100644 --- a/ocaml/database/escaping.ml +++ b/ocaml/database/escaping.ml @@ -11,7 +11,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -let reference = "_ref" (** Take a field name as a list (including namespaces) and return a flat name *) let escape_id x = String.concat "__" x diff --git a/ocaml/database/eventgen.ml b/ocaml/database/eventgen.ml index 76764c60..e8b64395 100644 --- a/ocaml/database/eventgen.ml +++ b/ocaml/database/eventgen.ml @@ -1,5 +1,5 @@ (* - * Copyright (C) 2006-2009 Citrix Systems Inc. + * Copyright (C) 2006-2010 Citrix Systems Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published @@ -68,3 +68,104 @@ let events_of_other_tbl_refs other_tbl_refs = (* Probably means the reference was dangling *) warn "skipping event for dangling reference %s: %s" tbl fld; []) other_tbl_refs) + +open Db_cache_types +open Db_action_helper + +let database_callback event db = + let context = Context.make "eventgen" in + + let other_tbl_refs tblname = follow_references tblname in + let other_tbl_refs_for_this_field tblname fldname = + List.filter (fun (_,fld) -> fld=fldname) (other_tbl_refs tblname) in + + match event with + | WriteField (tblname, objref, fldname, oldval, newval) -> + let events_old_val = + if Db_cache_impl.is_valid_ref oldval then + events_of_other_tbl_refs + (List.map (fun (tbl,fld) -> + (tbl, oldval, find_get_record tbl ~__context:context ~self:oldval)) (other_tbl_refs_for_this_field tblname fldname)) + else [] in + let events_new_val = + if Db_cache_impl.is_valid_ref newval then + events_of_other_tbl_refs + (List.map (fun (tbl,fld) -> + (tbl, newval, find_get_record tbl ~__context:context ~self:newval)) (other_tbl_refs_for_this_field tblname fldname)) + else [] + in + (* Generate event *) + let snapshot = find_get_record tblname ~__context:context ~self:objref in + let record = snapshot() in + List.iter (function + | tbl, ref, None -> + error "Failed to send MOD event for %s %s" tbl ref; + Printf.printf "Failed to send MOD event for %s %s\n%!" tbl ref; + | tbl, ref, Some s -> + events_notify ~snapshot:s tbl "mod" ref + ) events_old_val; + begin match record with + | None -> + error "Failed to send MOD event for %s %s" tblname objref; + Printf.printf "Failed to send MOD event for %s %s\n%!" tblname objref; + | Some record -> + events_notify ~snapshot:record tblname "mod" objref; + end; + List.iter (function + | tbl, ref, None -> + error "Failed to send MOD event for %s %s" tbl ref; + Printf.printf "Failed to send MOD event for %s %s\n%!" tbl ref; + | tbl, ref, Some s -> + events_notify ~snapshot:s tbl "mod" ref + ) events_new_val; + | PreDelete(tblname, objref) -> + begin match find_get_record tblname ~__context:context ~self:objref () with + | None -> + error "Failed to generate DEL event for %s %s" tblname objref; + (* Printf.printf "Failed to generate DEL event for %s %s\n%!" tblname objref; *) + | Some snapshot -> + events_notify ~snapshot tblname "del" objref + end + | Delete(tblname, objref, kv) -> + let other_tbl_refs = follow_references tblname in + let other_tbl_refs = + List.fold_left (fun accu (remote_tbl,fld) -> + let fld_value = List.assoc fld kv in + if Db_cache_impl.is_valid_ref fld_value + then (remote_tbl, fld_value, find_get_record remote_tbl ~__context:context ~self:fld_value) :: accu + else accu) + [] other_tbl_refs in + let other_tbl_ref_events = events_of_other_tbl_refs other_tbl_refs in + List.iter (function + | tbl, ref, None -> + error "Failed to generate MOD event on %s %s" tbl ref; +(* Printf.printf "Failed to generate MOD event on %s %s\n%!" tbl ref; *) + | tbl, ref, Some s -> + events_notify ~snapshot:s tbl "mod" ref + ) other_tbl_ref_events + + | Create (tblname, new_objref, kv) -> + let snapshot = find_get_record tblname ~__context:context ~self:new_objref in + let other_tbl_refs = follow_references tblname in + let other_tbl_refs = + List.fold_left (fun accu (tbl,fld) -> + let fld_value = List.assoc fld kv in + if Db_cache_impl.is_valid_ref fld_value + then (tbl, fld_value, find_get_record tbl ~__context:context ~self:fld_value) :: accu + else accu) + [] other_tbl_refs in + let other_tbl_events = events_of_other_tbl_refs other_tbl_refs in + begin match snapshot() with + | None -> + error "Failed to generate ADD event for %s %s" tblname new_objref; + (* Printf.printf "Failed to generate ADD event for %s %s\n%!" tblname new_objref; *) + | Some snapshot -> + events_notify ~snapshot tblname "add" new_objref; + end; + List.iter (function + | tbl, ref, None -> + error "Failed to generate MOD event for %s %s" tbl ref; + (* Printf.printf "Failed to generate MOD event for %s %s\n%!" tbl ref;*) + | tbl, ref, Some s -> + events_notify ~snapshot:s tbl "mod" ref + ) other_tbl_events diff --git a/ocaml/database/generation.ml b/ocaml/database/generation.ml index 3502ecee..f99a609f 100644 --- a/ocaml/database/generation.ml +++ b/ocaml/database/generation.ml @@ -23,8 +23,5 @@ let to_string g = Int64.to_string g let add_int a b = Int64.add a (Int64.of_int b) let null_generation = -1L -let filename dbconn = dbconn.Parse_db_conf.path^".generation" +let suffix = ".generation" -let read dbconn = - let gencount_fname = filename dbconn in - try Int64.of_string (Unixext.string_of_file gencount_fname) with _ -> 0L diff --git a/ocaml/database/redo_log.ml b/ocaml/database/redo_log.ml index e4bb34e4..b4d5bd40 100644 --- a/ocaml/database/redo_log.ml +++ b/ocaml/database/redo_log.ml @@ -276,8 +276,9 @@ let rec read_read_response sock fn_db fn_delta expected_gen_count latest_respons R.debug "Found record with generation count %Ld. Expected a record with generation count %Ld so skipping this record." gen_count expected_gen_count; (* Now skip over all the remaining data that the process is trying to send, discarding it all *) read_delta (fun _ _ -> ()) gen_count sock latest_response_time; - read_read_response sock (fun _ _ _ _ -> ()) (fun _ _ -> ()) expected_gen_count latest_response_time datasockpath + read_read_response sock fn_db fn_delta expected_gen_count latest_response_time datasockpath end else begin + R.debug "Found record with generation count %Ld as expected" gen_count; read_delta fn_delta gen_count sock latest_response_time; read_read_response sock fn_db fn_delta (Generation.add_int gen_count 1) latest_response_time datasockpath end @@ -674,3 +675,38 @@ let empty () = if is_enabled() then connect_and_perform_action (action_empty) "invalidate the redo log" +(* Write the given database to the redo-log *) +let flush_db_to_redo_log db = + if is_enabled () then begin + R.debug "Flushing database to redo-log"; + let write_db_to_fd = (fun out_fd -> Db_xml.To.fd out_fd db) in + write_db (Db_cache_types.Manifest.generation (Db_cache_types.Database.manifest db)) write_db_to_fd + end + +let database_callback event db = + let to_write = + if is_enabled () + then match event with + | Db_cache_types.WriteField (tblname, objref, fldname, oldval, newval) -> + R.debug "WriteField(%s, %s, %s, %s, %s)" tblname objref fldname oldval newval; + if Schema.is_field_persistent (Db_cache_types.Database.schema db) tblname fldname + then Some (WriteField(tblname, objref, fldname, newval)) + else None + | Db_cache_types.PreDelete (tblname, objref) -> + None + | Db_cache_types.Delete (tblname, objref, _) -> + if Schema.is_table_persistent (Db_cache_types.Database.schema db) tblname + then Some (DeleteRow(tblname, objref)) + else None + | Db_cache_types.Create (tblname, objref, kvs) -> + if Schema.is_table_persistent (Db_cache_types.Database.schema db) tblname + then Some (CreateRow(tblname, objref, kvs)) + else None + else None in + + Opt.iter (fun entry -> + write_delta (Db_cache_types.Manifest.generation (Db_cache_types.Database.manifest db)) entry + (fun () -> (* the function which will be invoked if a database write is required instead of a delta *) + flush_db_to_redo_log db + ) + ) to_write diff --git a/ocaml/database/redo_log.mli b/ocaml/database/redo_log.mli index ea966f58..63b6f859 100644 --- a/ocaml/database/redo_log.mli +++ b/ocaml/database/redo_log.mli @@ -86,3 +86,9 @@ val apply : (Generation.t -> Unix.file_descr -> int -> float -> unit) -> (Genera val empty : unit -> unit (** Invalidate the block device. This means that subsequent attempts to read from the block device will not find anything. This function is best-effort only and does not raise any exceptions in the case of error. *) + +val flush_db_to_redo_log: Db_cache_types.Database.t -> unit +(** Immediately write the given database to the redo log *) + +val database_callback: Db_cache_types.update -> Db_cache_types.Database.t -> unit +(** Given a database update, add it to the redo log *) diff --git a/ocaml/database/ref_index.ml b/ocaml/database/ref_index.ml index c10bc944..f0a0f5aa 100644 --- a/ocaml/database/ref_index.ml +++ b/ocaml/database/ref_index.ml @@ -1,5 +1,5 @@ (* - * Copyright (C) 2006-2009 Citrix Systems Inc. + * Copyright (C) 2006-2010 Citrix Systems Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published @@ -11,64 +11,28 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(* The ref_index keeps an index of references -> name_label/uuid. - This data is accessible only on the pool master, since the data all lives there. +(* This data is accessible only on the pool master, since the data all lives there. Unlike the "first-class" db calls, these are not marshalled over the wire if called on a slave -- so don't ever call them on slaves! :) *) - (* lookups are not serialised wrt dbcache mutex since they come into this - module directly. Hence need separate mutex here. *) -open Pervasiveext - -let ref_index_mutex = Mutex.create() - -(* Keep track of all references, and which class a reference belongs to: *) -type indexrec = {name_label:string option; uuid: string; _ref:string } -let ref_cache : (string,indexrec) Hashtbl.t = Hashtbl.create 100 -let uuid_cache : (string,indexrec) Hashtbl.t = Hashtbl.create 100 +open Db_cache_types +type indexrec = { + name_label:string option; + uuid: string; + _ref:string +} let string_of (x: indexrec) = - Printf.sprintf "%s%s" x.uuid (default "" (may (fun name -> Printf.sprintf " (%s)" name) x.name_label)) - -let insert indexrec = - Threadext.Mutex.execute ref_index_mutex - (fun ()-> - Hashtbl.replace ref_cache indexrec._ref indexrec; - Hashtbl.replace uuid_cache indexrec.uuid indexrec) + Printf.sprintf "%s%s" x.uuid (Opt.default "" (Opt.map (fun name -> Printf.sprintf " (%s)" name) x.name_label)) -let _internal_lookup key = - if Hashtbl.mem ref_cache key - then Hashtbl.find ref_cache key - else Hashtbl.find uuid_cache key - -let remove key = - Threadext.Mutex.execute ref_index_mutex - (fun ()-> - let x = _internal_lookup key in - Hashtbl.remove ref_cache x._ref; - Hashtbl.remove uuid_cache x.uuid) +let lookup key = + let db = Db_backend.get_database () in + let r (tblname, objref) = + let row = Table.find objref (TableSet.find tblname (Database.tableset db)) in { + name_label = (try Some (Row.find Db_names.name_label row) with _ -> None); + uuid = Row.find Db_names.uuid row; + _ref = Row.find Db_names.ref row; + } in + Opt.map r (Database.lookup_key key db) -let update_name_label key new_label = - Threadext.Mutex.execute ref_index_mutex - (fun ()-> - try - let irec = { (Hashtbl.find ref_cache key) with name_label = Some new_label} in - Hashtbl.replace ref_cache irec._ref irec; - Hashtbl.replace uuid_cache irec.uuid irec - with _ -> ()) -let update_uuid key new_uuid = - Threadext.Mutex.execute ref_index_mutex - (fun () -> - try - let irec = Hashtbl.find ref_cache key in - let old_uuid = irec.uuid in - let newrec = {irec with uuid=new_uuid} in - Hashtbl.replace ref_cache newrec._ref newrec; - Hashtbl.remove uuid_cache old_uuid; - Hashtbl.replace uuid_cache newrec.uuid newrec - with _ -> ()) - -let lookup key = - Threadext.Mutex.execute ref_index_mutex - (fun ()->try (Some (_internal_lookup key)) with _ -> None) diff --git a/ocaml/database/ref_index.mli b/ocaml/database/ref_index.mli index 400a8ac2..37dd07b0 100644 --- a/ocaml/database/ref_index.mli +++ b/ocaml/database/ref_index.mli @@ -1,5 +1,5 @@ (* - * Copyright (C) 2006-2009 Citrix Systems Inc. + * Copyright (C) 2006-2010 Citrix Systems Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published @@ -11,10 +11,10 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -type indexrec = {name_label:string option; uuid: string; _ref: string} +type indexrec = { + name_label: string option; + uuid: string; + _ref: string +} val string_of : indexrec -> string -val insert : indexrec -> unit -val remove : string (* ref or uuid *) -> unit -val update_name_label : string (* ref *) -> string -> unit -val update_uuid : string (* ref *) -> string -> unit val lookup : string (* ref or uuid *) -> indexrec option diff --git a/ocaml/database/schema.ml b/ocaml/database/schema.ml new file mode 100644 index 00000000..5e8df5dc --- /dev/null +++ b/ocaml/database/schema.ml @@ -0,0 +1,129 @@ + +module Column = struct + type t = { + name: string; + persistent: bool; (** see is_field_persistent *) + empty: string; (** fresh value used when loading non-persistent fields *) + default: string option; (** if column is missing, this is default value is used *) + + issetref: bool; (** only so we can special case set refs in the interface *) + } +end + +module Table = struct + type t = { + name: string; + columns: Column.t list; + persistent: bool; + } + let find name t = List.find (fun col -> col.Column.name = name) t.columns +end + +type relationship = + | OneToMany of string * string * string * string + +module Database = struct + type t = { + tables: Table.t list; + } + let find name t = List.find (fun tbl -> tbl.Table.name = name) t.tables +end + +module StringMap = Map.Make(struct + type t = string + let compare = Pervasives.compare +end) + +type t = { + major_vsn: int; + minor_vsn: int; + database: Database.t; + (** indexed by table name, a list of (this field, foreign table, foreign field) *) + one_to_many: ((string * string * string) list) StringMap.t; +} + +let database x = x.database + +let table tblname x = + try + Database.find tblname (database x) + with Not_found as e -> + Printf.printf "Failed to find table: %s\n%!" tblname; + raise e + +let empty = { + major_vsn = 0; + minor_vsn = 0; + database = { Database.tables = [] }; + one_to_many = StringMap.empty; +} + +let is_table_persistent schema tblname = + (table tblname schema).Table.persistent + +let is_field_persistent schema tblname fldname = + let tbl = table tblname schema in + let col = Table.find fldname tbl in + tbl.Table.persistent && col.Column.persistent + +let table_names schema = + List.map (fun t -> t.Table.name) (database schema).Database.tables + +module D=Debug.Debugger(struct let name="xapi" end) +open D +let one_to_many tblname schema = + (* If there is no entry in the map it means that the table has no one-to-many relationships *) + try + StringMap.find tblname schema.one_to_many + with Not_found -> [] + +(* This code could live higher up the stack *) +let of_datamodel () = + let rec flatten_fields fs acc = + match fs with + [] -> acc + | (Datamodel_types.Field f)::fs -> flatten_fields fs (f::acc) + | (Datamodel_types.Namespace (_,internal_fs))::fs -> flatten_fields fs (flatten_fields internal_fs acc) in + let column f = { + Column.name = Escaping.escape_id f.Datamodel_types.full_name; + persistent = f.Datamodel_types.field_persist; + empty = Datamodel_values.gen_empty_db_val f.Datamodel_types.ty; + (* NB Set(Ref _) fields aren't allowed to have a default value specified so we hardcode one here *) + default = begin match f.Datamodel_types.ty with + | Datamodel_types.Set (Datamodel_types.Ref _) -> Some (SExpr.string_of (SExpr.Node [])) + | _ -> Opt.map Datamodel_values.to_db_string f.Datamodel_types.default_value + end ; + issetref = begin match f.Datamodel_types.ty with + | Datamodel_types.Set (Datamodel_types.Ref _) -> true + | _ -> false + end ; + } in + + (* We store the reference in two places for no good reason still: *) + let _ref = { + Column.name = Db_names.ref; + persistent = true; + empty = ""; + default = None; + issetref = false; + } in + + let table obj = { + Table.name = Escaping.escape_obj obj.Datamodel_types.name; + columns = _ref :: (List.map column (flatten_fields obj.Datamodel_types.contents [])); + persistent = obj.Datamodel_types.persist = Datamodel_types.PersistEverything; + } in + let one_to_many t ((one_tbl, one_fld), (many_tbl, many_fld)) = + let key = (one_fld, many_tbl, many_fld) in + let l = if StringMap.mem one_tbl t then StringMap.find one_tbl t else [] in + StringMap.add one_tbl ((one_fld, many_tbl, many_fld) :: l) t in + + let database api = { + Database.tables = List.map table (Dm_api.objects_of_api api) + } in + { + major_vsn = Datamodel.schema_major_vsn; + minor_vsn = Datamodel.schema_minor_vsn; + database = database Datamodel.all_api; + one_to_many = List.fold_left one_to_many StringMap.empty (Dm_api.relations_of_api Datamodel.all_api); + } diff --git a/ocaml/db_process/xapi-db-process.ml b/ocaml/db_process/xapi-db-process.ml index ba28679d..e969777e 100644 --- a/ocaml/db_process/xapi-db-process.ml +++ b/ocaml/db_process/xapi-db-process.ml @@ -46,33 +46,32 @@ let parse_operation s = let initialise_db_connections() = let dbs = Parse_db_conf.parse_db_conf (if !config="" then Xapi_globs.db_conf_path else !config) in - Db_conn_store.initialise_db_connections dbs + Db_conn_store.initialise_db_connections dbs; + dbs let read_in_database() = (* Make sure we're running in master mode: we cannot be a slave and then access the dbcache *) Db_cache.set_master true; - initialise_db_connections(); - Db_dirty.make_blank_dirty_records(); + let connections = initialise_db_connections() in (* Initialiase in-memory database cache *) - Db_cache_impl.initialise_db_cache_nosync() + Db_cache_impl.make connections Schema.empty let write_out_databases() = - List.iter - (fun (_,db)-> Db_connections.force_flush_all db) - (Db_connections.get_dbs_and_gen_counts()) + Db_cache_impl.sync (Db_conn_store.read_db_connections ()) (Db_backend.get_database ()) (* should never be thrown due to checking argument at start *) exception UnknownFormat let write_out_database filename = print_string ("Dumping database to: "^filename^"\n"); - Db_connections.force_flush_all - {Parse_db_conf.dummy_conf with - Parse_db_conf.path=filename; - Parse_db_conf.mode=Parse_db_conf.No_limit; - Parse_db_conf.compress=(!compress) - } + Db_cache_impl.sync + [ { + Parse_db_conf.dummy_conf with + Parse_db_conf.path=filename; + Parse_db_conf.mode=Parse_db_conf.No_limit; + Parse_db_conf.compress=(!compress) + } ] (Db_backend.get_database ()) let help_pad = " " let operation_list = @@ -94,7 +93,7 @@ let do_write_database() = begin read_in_database(); if !xmltostdout then - Db_cache_impl.dump_db_cache (Unix.descr_of_out_channel stdout) + Db_xml.To.fd (Unix.descr_of_out_channel stdout) (Db_backend.get_database ()) else write_out_database !filename end @@ -102,49 +101,47 @@ let do_write_database() = let find_my_host_row() = Xapi_inventory.read_inventory (); let localhost_uuid = Xapi_inventory.lookup Xapi_inventory._installation_uuid in - let host_table = lookup_table_in_cache Db_backend.cache "host" in - let host_rows = get_rowlist host_table in - List.find - (fun row -> let row_uuid = lookup_field_in_row row "uuid" in - localhost_uuid=row_uuid) host_rows + let db = Db_backend.get_database () in + let tbl = TableSet.find Db_names.host (Database.tableset db) in + Table.fold (fun r row acc -> if Row.find Db_names.uuid row = localhost_uuid then (Some (r, row)) else acc) tbl None let _iscsi_iqn = "iscsi_iqn" let _other_config = "other_config" let do_read_hostiqn() = read_in_database(); - let localhost_row = find_my_host_row() in - let other_config_sexpr = lookup_field_in_row localhost_row _other_config in - let other_config = String_unmarshall_helper.map (fun x->x) (fun x->x) other_config_sexpr in - Printf.printf "%s" (List.assoc _iscsi_iqn other_config) + match find_my_host_row() with + | None -> failwith "No row for localhost" + | Some (_, row) -> + let other_config_sexpr = Row.find Db_names.other_config row in + let other_config = String_unmarshall_helper.map (fun x->x) (fun x->x) other_config_sexpr in + Printf.printf "%s" (List.assoc _iscsi_iqn other_config) let do_write_hostiqn() = if !iqn = "" then fatal_error "Must specify '-hostiqn '"; let new_iqn = !iqn in read_in_database(); - let localhost_row = find_my_host_row() in - (* read other_config from my row, replace host_iqn if already there, add it if its not there and write back *) - let other_config_sexpr = lookup_field_in_row localhost_row _other_config in - let other_config = String_unmarshall_helper.map (fun x->x) (fun x->x) other_config_sexpr in - let other_config = - if List.mem_assoc _iscsi_iqn other_config then - (* replace if key already exists *) - List.map (fun (k,v) ->k, if k=_iscsi_iqn then new_iqn else v) other_config - else - (* ... otherwise add new key/value pair *) - (_iscsi_iqn,new_iqn)::other_config in - let other_config = String_marshall_helper.map (fun x->x) (fun x->x) other_config in - set_field_in_row localhost_row _other_config other_config; - write_out_databases() + match find_my_host_row() with + | None -> failwith "No row for localhost" + | Some (r, row) -> + (* read other_config from my row, replace host_iqn if already there, add it if its not there and write back *) + let other_config_sexpr = Row.find Db_names.other_config row in + let other_config = String_unmarshall_helper.map (fun x->x) (fun x->x) other_config_sexpr in + let other_config = + if List.mem_assoc _iscsi_iqn other_config then + (* replace if key already exists *) + List.map (fun (k,v) ->k, if k=_iscsi_iqn then new_iqn else v) other_config + else + (* ... otherwise add new key/value pair *) + (_iscsi_iqn,new_iqn)::other_config in + let other_config = String_marshall_helper.map (fun x->x) (fun x->x) other_config in + Db_backend.update_database (set_field_in_row Db_names.host r Db_names.other_config other_config); + write_out_databases() let do_am_i_in_the_database () = - read_in_database(); - try - let (_: Db_cache_types.row) = find_my_host_row() in - Printf.printf "true" - with _ -> - Printf.printf "false" + read_in_database(); + Printf.printf "%b" (find_my_host_row () <> None) let _ = init_logs(); diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 86df34de..4c48a8f9 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -4827,7 +4827,7 @@ let vbd = "unpluggable" "true if this VBD will support hot-unplug"; field ~qualifier:DynamicRO ~ty:Bool "storage_lock" "true if a storage level lock was acquired"; field ~qualifier:StaticRO ~ty:Bool "empty" "if true this represents an empty drive"; - field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:Bool "reserved" "true if the VBD is reserved pending a reboot/migrate"; + field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "reserved" "true if the VBD is reserved pending a reboot/migrate"; field ~ty:(Map(String, String)) "other_config" "additional configuration"; ] @ device_status_fields @ diff --git a/ocaml/idl/ocaml_backend/OMakefile b/ocaml/idl/ocaml_backend/OMakefile index 862363bc..487722bd 100644 --- a/ocaml/idl/ocaml_backend/OMakefile +++ b/ocaml/idl/ocaml_backend/OMakefile @@ -70,6 +70,7 @@ SERVER_OBJS = ../../database/escaping locking_helpers \ $(AUTOGEN_HELPER_DIR)/db_rpc_common_v2 \ $(AUTOGEN_HELPER_DIR)/db_rpc_client_v2 \ $(AUTOGEN_HELPER_DIR)/db_cache_types \ + $(AUTOGEN_HELPER_DIR)/schema \ $(AUTOGEN_HELPER_DIR)/db_filter \ $(AUTOGEN_HELPER_DIR)/db_filter_types \ $(AUTOGEN_HELPER_DIR)/db_filter_parse \ @@ -81,7 +82,6 @@ SERVER_OBJS = ../../database/escaping locking_helpers \ $(AUTOGEN_HELPER_DIR)/db_exn \ $(AUTOGEN_HELPER_DIR)/ref_index \ $(AUTOGEN_HELPER_DIR)/db_backend \ - $(AUTOGEN_HELPER_DIR)/db_dirty \ $(AUTOGEN_HELPER_DIR)/backend_xml \ $(AUTOGEN_HELPER_DIR)/generation \ $(AUTOGEN_HELPER_DIR)/db_connections \ diff --git a/ocaml/idl/ocaml_backend/gen_db_actions.ml b/ocaml/idl/ocaml_backend/gen_db_actions.ml index ad1402c9..506cffb1 100644 --- a/ocaml/idl/ocaml_backend/gen_db_actions.ml +++ b/ocaml/idl/ocaml_backend/gen_db_actions.ml @@ -171,7 +171,7 @@ let read_set_ref obj other full_name = Printf.sprintf "if not(DB.is_valid_ref %s)" Client._self; Printf.sprintf "then raise (Api_errors.Server_error(Api_errors.handle_invalid, [ %s ]))" Client._self; Printf.sprintf "else List.map %s.%s (DB.read_set_ref " _string_to_dm (OU.alias_of_ty (DT.Ref other)); - Printf.sprintf " { table = \"%s\"; return=Db_action_helper.reference; " (Escaping.escape_obj obj'); + Printf.sprintf " { table = \"%s\"; return=Db_names.ref; " (Escaping.escape_obj obj'); Printf.sprintf " where_field = \"%s\"; where_value = %s })" fld' Client._self ] diff --git a/ocaml/xapi/parse_db_conf.ml b/ocaml/xapi/parse_db_conf.ml index 763150d8..a93790c9 100644 --- a/ocaml/xapi/parse_db_conf.ml +++ b/ocaml/xapi/parse_db_conf.ml @@ -26,7 +26,8 @@ type db_connection = write_limit_period:int; write_limit_write_cycles:int; is_on_remote_storage:bool; - other_parameters:(string*string) list + other_parameters:(string*string) list; + mutable last_generation_count: Generation.t; } let default_write_limit_period = 21600 (* 6 hours *) @@ -39,13 +40,18 @@ let dummy_conf = write_limit_write_cycles=default_write_cycles; compress=false; is_on_remote_storage=false; - other_parameters=[] + other_parameters=[]; + last_generation_count = Generation.null_generation; } -(* the db conf that we use for temporary backup/restore files *) -let backup_file_dbconn = {dummy_conf with - path=Xapi_globs.db_temporary_restore_path - } +let make path = { dummy_conf with path = path } + +let generation_filename dbconn = dbconn.path ^ Generation.suffix + +let generation_read dbconn = + let gencount_fname = generation_filename dbconn in + try Generation.of_string (Unixext.string_of_file gencount_fname) with _ -> 0L + (* The db conf used for bootstrap purposes, e.g. mounting the 'real' db on shared storage *) let db_snapshot_dbconn = {dummy_conf with @@ -122,7 +128,8 @@ let parse_db_conf s = is_on_remote_storage = maybe_put_in "is_on_remote_storage" false bool_of_string; write_limit_period=maybe_put_in "write_limit_period" default_write_limit_period int_of_string; write_limit_write_cycles=maybe_put_in "write_limit_write_cycles" default_write_cycles int_of_string; - other_parameters = !key_values (* the things remaining in key_values at this point are the ones we haven't parsed out explicitly above.. *) + other_parameters = !key_values; (* the things remaining in key_values at this point are the ones we haven't parsed out explicitly above.. *) + last_generation_count = Generation.null_generation; } in let connections : db_connection list ref = ref [] in while !lines<>[] do diff --git a/ocaml/xapi/pool_db_backup.ml b/ocaml/xapi/pool_db_backup.ml index a3df4a6a..05870646 100644 --- a/ocaml/xapi/pool_db_backup.ml +++ b/ocaml/xapi/pool_db_backup.ml @@ -40,124 +40,127 @@ let write_database (s: Unix.file_descr) ~__context = let len = String.length minimally_compliant_miami_database in ignore (Unix.write s minimally_compliant_miami_database 0 len) else - Db_cache_impl.dump_db_cache s + Db_xml.To.fd s (Db_backend.get_database ()) (** Make sure the backup database version is compatible *) -let version_check manifest = - if manifest.Db_cache_types.schema_major_vsn <> Datamodel.schema_major_vsn || - manifest.Db_cache_types.schema_minor_vsn <> Datamodel.schema_minor_vsn then - begin - error "Pool backup file was created with incompatable product version"; - raise (Api_errors.Server_error(Api_errors.restore_incompatible_version, [])) - end +let version_check db = + let major, minor = Manifest.schema (Database.manifest db) in + if major <> Datamodel.schema_major_vsn || minor <> Datamodel.schema_minor_vsn then begin + error "Pool backup file was created with incompatable product version"; + raise (Api_errors.Server_error(Api_errors.restore_incompatible_version, [])) + end (** Restore all of our state from an XML backup. This includes the pool config, token etc *) let restore_from_xml __context dry_run (xml_filename: string) = - debug "attempting to restore database from %s" xml_filename; - let manifest, unmarshalled_db = Db_xml.From.file xml_filename in + debug "attempting to restore database from %s" xml_filename; + let db = Db_xml.From.file (Schema.of_datamodel ()) xml_filename in - (* Do not write the pool_conf: it contains only the master/slave configuration which is - managed separately *) - version_check manifest; - (* To prevent duplicate installation_uuids or duplicate IP address confusing the - "no other masters" check we remove all hosts from the backup except the master. *) - let hosts = lookup_table_in_cache unmarshalled_db "host" in - let uuid_to_ref = fold_over_rows - (fun _ref r acc -> (lookup_field_in_row r "uuid", _ref)::acc) hosts [] in + (* Do not write the pool_conf: it contains only the master/slave configuration which is + managed separately *) + version_check db; + (* To prevent duplicate installation_uuids or duplicate IP address confusing the + "no other masters" check we remove all hosts from the backup except the master. *) + let ts = Database.tableset db in + let hosts = TableSet.find Db_names.host ts in + let uuid_to_ref = Table.fold + (fun _ref r acc -> (Row.find "uuid" r, _ref)::acc) hosts [] in - (* Look up the pool master: *) - let pools = lookup_table_in_cache unmarshalled_db Datamodel._pool in - let master = fold_over_rows (fun _ref r acc -> lookup_field_in_row r "master") pools "" in + (* Look up the pool master: *) + let pools = TableSet.find Db_names.pool ts in + let master = Table.fold + (fun _ref r acc -> Row.find Db_names.master r) pools "" in - (* Remove all slaves from the database *) - let hosts' = create_empty_table () in - iter_over_rows (fun _ref r -> if _ref = master then set_row_in_table hosts' master r) hosts; - set_table_in_cache unmarshalled_db "host" hosts'; - debug "All hosts: [ %s ]" (String.concat "; " (List.map fst uuid_to_ref)); - debug "Previous master: %s" master; - - (* Rewrite this host's PIFs' MAC addresses based on device name. *) + (* Remove all slaves from the database *) + let master_row = Table.find master hosts in + let hosts' = Table.add master master_row Table.empty in + + let db = set_table Db_names.host hosts' db in + + debug "All hosts: [ %s ]" (String.concat "; " (List.map fst uuid_to_ref)); + debug "Previous master: %s" master; - (* First inspect the current machine's configuration and build up a table of - device name -> PIF reference. *) - let localhost = Helpers.get_localhost ~__context in - let all_pifs = Db.Host.get_PIFs ~__context ~self:localhost in + (* Rewrite this host's PIFs' MAC addresses based on device name. *) + + (* First inspect the current machine's configuration and build up a table of + device name -> PIF reference. *) + let localhost = Helpers.get_localhost ~__context in + let all_pifs = Db.Host.get_PIFs ~__context ~self:localhost in - let device_to_ref = - let physical = List.filter (fun self -> Db.PIF.get_physical ~__context ~self) all_pifs in - List.map (fun self -> Db.PIF.get_device ~__context ~self, self) physical in + let device_to_ref = + let physical = List.filter (fun self -> Db.PIF.get_physical ~__context ~self) all_pifs in + List.map (fun self -> Db.PIF.get_device ~__context ~self, self) physical in - (* Since it's difficult for us to change the /etc/xensource-inventory and the ifcfg- - files, we /preserve/ the current management PIF across the restore. NB this interface - might be a bond or a vlan. *) - let mgmt_dev = - match List.filter (fun self -> Db.PIF.get_management ~__context ~self) all_pifs with - | [ dev ] -> Some (Db.PIF.get_device ~__context ~self:dev) - | _ -> None (* no management interface configured *) in + (* Since it's difficult for us to change the /etc/xensource-inventory and the ifcfg- + files, we /preserve/ the current management PIF across the restore. NB this interface + might be a bond or a vlan. *) + let mgmt_dev = + match List.filter (fun self -> Db.PIF.get_management ~__context ~self) all_pifs with + | [ dev ] -> Some (Db.PIF.get_device ~__context ~self:dev) + | _ -> None (* no management interface configured *) in - (* The PIFs of the master host in the backup need their MAC addresses adjusting - to match the current machine. For safety the new machine needs to have at least - the same number and same device names as the backup being restored. (Note that - any excess interfaces will be forgotten and need to be manually reintroduced) - - Additionally we require the currently configured management interface device name - is found in the backup so we can re-use the existing ifcfg- files in /etc/. - We need this because the interface-reconfigure --force-up relies on the existing - config files. Ideally a master startup (such as that in the restore db code) would - actively regenerate the config files but this is too invasive a change for CA-15164. - - PIFs whose device name are not recognised or those belonging to (now dead) - slaves are forgotten. *) - let pifs = lookup_table_in_cache unmarshalled_db "PIF" in - let pifs' = create_empty_table () in - let found_mgmt_if = ref false in - let ifs_in_backup = ref [] in - iter_over_rows - (fun _ref r -> - if lookup_field_in_row r "host" = master then begin - let device = lookup_field_in_row r "device" in - ifs_in_backup := device :: !ifs_in_backup; - - let uuid = lookup_field_in_row r "uuid" in - let physical = bool_of_string (lookup_field_in_row r "physical") in + (* The PIFs of the master host in the backup need their MAC addresses adjusting + to match the current machine. For safety the new machine needs to have at least + the same number and same device names as the backup being restored. (Note that + any excess interfaces will be forgotten and need to be manually reintroduced) + + Additionally we require the currently configured management interface device name + is found in the backup so we can re-use the existing ifcfg- files in /etc/. + We need this because the interface-reconfigure --force-up relies on the existing + config files. Ideally a master startup (such as that in the restore db code) would + actively regenerate the config files but this is too invasive a change for CA-15164. + + PIFs whose device name are not recognised or those belonging to (now dead) + slaves are forgotten. *) + let pifs = TableSet.find "PIF" ts in + + let pifs' = ref Table.empty in + let found_mgmt_if = ref false in + let ifs_in_backup = ref [] in + Table.iter + (fun _ref r -> + if Row.find "host" r = master then begin + let device = Row.find "device" r in + ifs_in_backup := device :: !ifs_in_backup; - let pif = create_empty_row () in - iter_over_fields (fun k v -> set_field_in_row pif k v) r; + let uuid = Row.find "uuid" r in + let physical = bool_of_string (Row.find "physical" r) in - let is_mgmt = Some device = mgmt_dev in - set_field_in_row pif "management" (string_of_bool is_mgmt); - if is_mgmt then found_mgmt_if := true; + let is_mgmt = Some device = mgmt_dev in + let pif = Row.add "management" (string_of_bool is_mgmt) r in + if is_mgmt then found_mgmt_if := true; - (* We only need to rewrite the MAC addresses of physical PIFs *) - if physical then begin - (* If this is a physical PIF but we can't find the device name - on the restore target, bail out. *) - if not(List.mem_assoc device device_to_ref) - then raise (Api_errors.Server_error(Api_errors.restore_target_missing_device, [ device ])); - (* Otherwise rewrite the MAC address to match the current machine - and set the management flag accordingly *) - let existing_pif = List.assoc device device_to_ref in - set_field_in_row pif "MAC" (Db.PIF.get_MAC ~__context ~self:existing_pif); - end; + (* We only need to rewrite the MAC addresses of physical PIFs *) + let pif = + if not physical then pif + else begin + (* If this is a physical PIF but we can't find the device name + on the restore target, bail out. *) + if not(List.mem_assoc device device_to_ref) + then raise (Api_errors.Server_error(Api_errors.restore_target_missing_device, [ device ])); + (* Otherwise rewrite the MAC address to match the current machine + and set the management flag accordingly *) + let existing_pif = List.assoc device device_to_ref in + Row.add "MAC" (Db.PIF.get_MAC ~__context ~self:existing_pif) pif + end in - debug "Rewriting PIF uuid %s device %s (management %s -> %s) MAC %s -> %s" - uuid device (lookup_field_in_row r "management") (lookup_field_in_row pif "management") - (lookup_field_in_row r "MAC") (lookup_field_in_row pif "MAC"); - set_row_in_table pifs' _ref pif - end else begin - (* don't bother copying forgotten slave PIFs *) - debug "Forgetting slave PIF uuid %s" (lookup_field_in_row r "uuid") - end - ) pifs; - set_table_in_cache unmarshalled_db "PIF" pifs'; - (* Check that management interface was synced up *) - if not(!found_mgmt_if) && mgmt_dev <> None - then raise (Api_errors.Server_error(Api_errors.restore_target_mgmt_if_not_in_backup, !ifs_in_backup)); - - (* write manifest and unmarshalled db directly to db_temporary_restore_path, so its ready for us on restart *) - if not(dry_run) - then Db_xml.To.file Xapi_globs.db_temporary_restore_path (manifest, unmarshalled_db) + debug "Rewriting PIF uuid %s device %s (management %s -> %s) MAC %s -> %s" + uuid device (Row.find "management" r) (Row.find "management" pif) + (Row.find "MAC" r) (Row.find "MAC" pif); + pifs' := Table.add _ref pif !pifs' + end else begin + (* don't bother copying forgotten slave PIFs *) + debug "Forgetting slave PIF uuid %s" (Row.find "uuid" r) + end + ) pifs; + let db = set_table "PIF" !pifs' db in + (* Check that management interface was synced up *) + if not(!found_mgmt_if) && mgmt_dev <> None + then raise (Api_errors.Server_error(Api_errors.restore_target_mgmt_if_not_in_backup, !ifs_in_backup)); + + (* write manifest and unmarshalled db directly to db_temporary_restore_path, so its ready for us on restart *) + if not(dry_run) + then Db_xml.To.file Xapi_globs.db_temporary_restore_path db (** Called when a CLI user downloads a backup of the database *) let pull_database_backup_handler (req: Http.request) s = @@ -217,9 +220,9 @@ let http_fetch_db ~master_address ~pool_secret = (* no content length since it's streaming *) let _, _ = Xmlrpcclient.http_rpc_fd fd headers "" in let inchan = Unix.in_channel_of_descr fd in (* never read from fd again! *) - let manifest, unmarshalled_db = Db_xml.From.channel inchan in - version_check manifest; - (manifest,unmarshalled_db) + let db = Db_xml.From.channel (Schema.of_datamodel ()) inchan in + version_check db; + db ) (fun () -> Stunnel.disconnect st_proc) @@ -236,13 +239,13 @@ let fetch_database_backup ~master_address ~pool_secret ~force = (* if there's nothing to do then we don't even bother requesting backup *) if connections<>[] then begin - let (manifest,unmarshalled_db) = http_fetch_db ~master_address ~pool_secret in + let db = http_fetch_db ~master_address ~pool_secret in (* flush backup to each of our database connections *) List.iter (fun dbconn -> Threadext.Mutex.execute slave_backup_m (fun () -> - Db_connections.force_flush_specified_cache dbconn unmarshalled_db; + Db_connections.flush dbconn db; Slave_backup.notify_write dbconn (* update writes_this_period for this connection *) ) ) @@ -260,7 +263,7 @@ let pool_db_backup_thread () = begin let hosts = Db.Host.get_all ~__context in let hosts = List.filter (fun hostref -> hostref <> !Xapi_globs.localhost_ref) hosts in - let generation = Db_lock.with_lock (fun () -> Db_cache_types.generation_of_cache Db_backend.cache) in + let generation = Db_lock.with_lock (fun () -> Manifest.generation (Database.manifest (Db_backend.get_database ()))) in let dohost host = try Thread.delay pool_db_sync_timer; diff --git a/ocaml/xapi/redo_log_usage.ml b/ocaml/xapi/redo_log_usage.ml index 5f7013a0..6ce48997 100644 --- a/ocaml/xapi/redo_log_usage.ml +++ b/ocaml/xapi/redo_log_usage.ml @@ -13,7 +13,7 @@ *) open Pervasiveext (* for ignore_exn *) -module R = Debug.Debugger(struct let name = "xapi" end) +module R = Debug.Debugger(struct let name = "redo_log" end) exception NoGeneration exception DeltaTooOld @@ -43,11 +43,11 @@ let read_from_redo_log staging_path = if total_read <> expected_length then raise (DatabaseWrongSize (expected_length, total_read)); (* Read from the file into the cache *) - let fake_conn_db_file = { Parse_db_conf.dummy_conf with - Parse_db_conf.path = temp_file - } in + let conn = Parse_db_conf.make temp_file in (* ideally, the reading from the file would also respect the latest_response_time *) - ignore (Backend_xml.populate_and_read_manifest fake_conn_db_file); + let db = Backend_xml.populate (Schema.of_datamodel ()) conn in + Db_backend.update_database (fun _ -> db); + R.debug "Finished reading database from %s into cache (generation = %Ld)" temp_file gen_count; (* Set the generation count *) @@ -60,19 +60,19 @@ let read_from_redo_log staging_path = in let read_delta gen_count delta = - (* Apply the delta *) - Db_cache.apply_delta_to_cache delta; - (* Update the generation count *) - match !latest_generation with - | None -> raise NoGeneration (* we should have already read in a database with a generation count *) - | Some g -> - if gen_count > g - then latest_generation := Some gen_count - else raise DeltaTooOld (* the delta should be at least as new as the database to which it applies *) + (* Apply the delta *) + Db_cache.apply_delta_to_cache delta; + (* Update the generation count *) + match !latest_generation with + | None -> raise NoGeneration (* we should have already read in a database with a generation count *) + | Some g -> + if gen_count > g + then latest_generation := Some gen_count + else raise DeltaTooOld (* the delta should be at least as new as the database to which it applies *) in R.debug "Reading from redo log"; - Redo_log.apply read_db read_delta; + Redo_log.apply read_db read_delta; (* 3. Write the database and generation to a file * Note: if there were no deltas applied then this is semantically @@ -93,10 +93,10 @@ let read_from_redo_log staging_path = R.debug "Database from redo log has generation %Ld" generation; (* Write the in-memory cache to the file *) (* Make sure the generation count is right -- is this necessary? *) - Db_cache_types.set_generation Db_backend.cache generation; - let manifest = Db_cache_types.manifest_of_cache Db_backend.cache in - Db_xml.To.file staging_path (manifest, Db_backend.cache); - Unixext.write_string_to_file (staging_path ^ ".generation") (Generation.to_string generation) + Db_backend.update_database (Db_cache_types.Database.set_generation generation); + let db = Db_backend.get_database () in + Db_xml.To.file staging_path db; + Unixext.write_string_to_file (staging_path ^ ".generation") (Generation.to_string generation) end with _ -> () (* it's just a best effort. if we can't read from the log, then don't worry. *) diff --git a/ocaml/xapi/xapi.ml b/ocaml/xapi/xapi.ml index e9ce71d2..5974ce9e 100644 --- a/ocaml/xapi/xapi.ml +++ b/ocaml/xapi/xapi.ml @@ -65,20 +65,22 @@ let database_ready_for_clients_c = Condition.create () let database_ready_for_clients_m = Mutex.create () let database_ready_for_clients = ref false (* while this is false, client calls will be blocked *) +open Db_cache_types + (** Starts the main database engine: this should be done only on the master node. The db connections must have been parsed from db.conf file and initialised before this fn is called. Also this function depends on being able to call API functions through the external interface. *) let start_database_engine () = - Db_dirty.make_blank_dirty_records(); + let schema = Schema.of_datamodel () in + + let connections = Db_conn_store.read_db_connections () in + Db_cache_impl.make connections schema; + Db_cache_impl.sync connections (Db_backend.get_database ()); - (* Check if db files exist, if not make them *) - let major, minor = Datamodel.schema_major_vsn, Datamodel.schema_minor_vsn in - List.iter (Db_connections.maybe_create_new_db (major,minor)) (Db_conn_store.read_db_connections()); + Db_backend.update_database (Database.register_callback "redo_log" Redo_log.database_callback); + Db_backend.update_database (Database.register_callback "events" Eventgen.database_callback); - (* Initialise in-memory database cache *) - debug "Populating db cache"; - Db_cache_impl.initialise (); debug "Performing initial DB GC"; Db_gc.single_pass (); diff --git a/ocaml/xapi/xapi_ha.ml b/ocaml/xapi/xapi_ha.ml index 02705c4a..50179004 100644 --- a/ocaml/xapi/xapi_ha.ml +++ b/ocaml/xapi/xapi_ha.ml @@ -1369,6 +1369,8 @@ let disable __context = then raise (Api_errors.Server_error(Api_errors.ha_not_enabled, [])); disable_internal __context +open Db_cache_types (* for the Manifest. Database. functions below *) + let enable __context heartbeat_srs configuration = debug "Enabling HA on the Pool."; let pool = Helpers.get_pool ~__context in @@ -1524,7 +1526,7 @@ let enable __context heartbeat_srs configuration = (* ... *) (* Make sure everyone's got a fresh database *) - let generation = Db_lock.with_lock (fun () -> Db_cache_types.generation_of_cache Db_backend.cache) in + let generation = Db_lock.with_lock (fun () -> Manifest.generation (Database.manifest (Db_backend.get_database ()))) in let errors = thread_iter_all_exns (fun host -> debug "Synchronising database with host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); diff --git a/ocaml/xapi/xapi_host_backup.ml b/ocaml/xapi/xapi_host_backup.ml index b702f1a1..77abc117 100644 --- a/ocaml/xapi/xapi_host_backup.ml +++ b/ocaml/xapi/xapi_host_backup.ml @@ -62,22 +62,8 @@ let host_backup_handler (req: request) s = req.close <- true; Xapi_http.with_context "Downloading host backup" req s (fun __context -> - Http_svr.headers s (Http.http_200_ok ()); - - if on_oem __context && Pool_role.is_master () - then - begin - List.iter (fun (_,db)-> Db_connections.force_flush_all db) (Db_connections.get_dbs_and_gen_counts()); - Threadext.Mutex.execute - Db_lock.global_flush_mutex - (fun () -> - host_backup_handler_core ~__context s - ) - end - else - begin + Http_svr.headers s (Http.http_200_ok ()); host_backup_handler_core ~__context s - end ) (** Helper function to prevent double-closes of file descriptors diff --git a/ocaml/xapi/xapi_pool.ml b/ocaml/xapi/xapi_pool.ml index 7d9e4096..48ab9ea3 100644 --- a/ocaml/xapi/xapi_pool.ml +++ b/ocaml/xapi/xapi_pool.ml @@ -760,6 +760,8 @@ let eject ~__context ~host = (* delete backup databases and any temporary restore databases *) Unixext.unlink_safe Xapi_globs.backup_db_xml; Unixext.unlink_safe Xapi_globs.db_temporary_restore_path; + Unixext.unlink_safe Xapi_globs.ha_metadata_db; + Unixext.unlink_safe Xapi_globs.gen_metadata_db; (* If we've got local storage, remove it *) if (Helpers.local_storage_exists ()) then begin @@ -780,7 +782,7 @@ let eject ~__context ~host = (* We need to delete all local dbs but leave remote ones alone *) let local = List.filter (fun db -> not db.Parse_db_conf.is_on_remote_storage) dbs in List.iter Unixext.unlink_safe (List.map (fun db->db.Parse_db_conf.path) local); - List.iter Unixext.unlink_safe (List.map Generation.filename local); + List.iter Unixext.unlink_safe (List.map Parse_db_conf.generation_filename local); (* remove any shared databases from my db.conf *) (* XXX: on OEM edition the db.conf is rebuilt on every boot *) Parse_db_conf.write_db_conf local; @@ -801,6 +803,8 @@ let eject ~__context ~host = (* Prohibit parallel flushes since they're so expensive *) let sync_m = Mutex.create () +open Db_cache_types + let sync_database ~__context = Mutex.execute sync_m (fun () -> @@ -811,7 +815,7 @@ let sync_database ~__context = then debug "flushed database to metadata VDI: assuming this is sufficient." else begin debug "flushing database to all online nodes"; - let generation = Db_lock.with_lock (fun () -> Db_cache_types.generation_of_cache Db_backend.cache) in + let generation = Db_lock.with_lock (fun () -> Manifest.generation (Database.manifest (Db_backend.get_database ()))) in Threadext.thread_iter (fun host -> Helpers.call_api_functions ~__context diff --git a/ocaml/xapi/xha_metadata_vdi.ml b/ocaml/xapi/xha_metadata_vdi.ml index 02d57622..8849b28a 100644 --- a/ocaml/xapi/xha_metadata_vdi.ml +++ b/ocaml/xapi/xha_metadata_vdi.ml @@ -68,6 +68,6 @@ open Pervasiveext (** Attempt to flush the database to the metadata VDI *) let flush_database ~__context = try - Backend_xml.flush_db_to_redo_log Db_backend.cache; + Redo_log.flush_db_to_redo_log (Db_backend.get_database ()); true with _ -> false