- my @urls;
- my %unique_urls;
-
- foreach (split(/ *</)) {
- if ( m/^meta /i ) {
-
- # Likewise, reject any web pages that have a KEYWORDS meta tag
- # that is too long.
- #
- if (m/name ?= ?\"?keywords\"?/i &&
- m/content ?= ?\"([^\"]+)\"/) {
- my $L = length($1);
- if ($L > 1000) {
- if ($verbose > 1) {
- print STDERR "$progname: keywords of" .
- " length $L in $url: rejecting.\n";
- }
- $rejected_urls{$url} = $L;
- $body = undef;
- $_ = undef;
- return ();
- } elsif ( $verbose > 2 ) {
- print STDERR "$progname: keywords of length $L" .
- " in $url (ok.)\n";
- }
- }
-
- } elsif ( m/^(img|a) .*(src|href) ?= ?\"? ?(.*?)[ >\"]/io ) {
-
- my $was_inline = ( "$1" eq "a" || "$1" eq "A" );
- my $link = $3;
- my ( $width ) = m/width ?=[ \"]*(\d+)/oi;
- my ( $height ) = m/height ?=[ \"]*(\d+)/oi;
- $_ = $link;
-
- if ( m@^/@o ) {
- my $site;
- ( $site = $base ) =~ s@^(http://[^/]*).*@$1@gio;
- $_ = "$site$link";
- } elsif ( ! m@^[^/:?]+:@ ) {
- $_ = "$base$link";
- s@/\./@/@g;
- while (s@/\.\./@/@g) {
- }
- }
-
- # skip non-http
- if ( ! m@^http://@io ) {
- next;
- }
-
- # skip non-image
- if ( ! m@[.](gif|jpg|jpeg|pjpg|pjpeg)$@io ) {
- next;
- }
-
- # skip really short or really narrow images
- if ( $width && $width < $min_width) {
- if ( $verbose > 2 ) {
- if (!$height) { $height = "?"; }
- print STDERR "$progname: skip narrow image " .
- "$_ (${width}x$height)\n";
- }
- next;
- }
-
- if ( $height && $height < $min_height) {
- if ( $verbose > 2 ) {
- if (!$width) { $width = "?"; }
- print STDERR "$progname: skip short image " .
- "$_ (${width}x$height)\n";
- }
- next;
- }
-
- # skip images with ratios that make them look like banners.
- if ( $min_ratio && $width && $height &&
- ($width * $min_ratio ) > $height ) {
- if ( $verbose > 2 ) {
- if (!$height) { $height = "?"; }
- print STDERR "$progname: skip bad ratio " .
- "$_ (${width}x$height)\n";
- }
- next;
- }
-
- my $url = $_;
-
- if ( $unique_urls{$url} ) {
- if ( $verbose > 2 ) {
- print STDERR "$progname: skip duplicate image $_\n";
- }
- next;
- }
-
- if ( $verbose > 2 ) {
- print STDERR "$progname: got $url" .
- ($width && $height ? " (${width}x${height})" : "") .
- ($was_inline ? " (inline)" : "") . "\n";
- }
-
- $urls[++$#urls] = $url;
- $unique_urls{$url}++;
-
- # jpegs are preferable to gifs.
- $_ = $url;
- if ( ! m@[.]gif$@io ) {
- $urls[++$#urls] = $url;
- }
-
- # pointers to images are preferable to inlined images.
- if ( ! $was_inline ) {
- $urls[++$#urls] = $url;
- $urls[++$#urls] = $url;
- }
- }
+ $last_search = $lycos_search_url; # for warnings
+
+ # lycos seems to always give us back dictionaries and word lists if
+ # we search for more than one word...
+ #
+ my $words = random_word();
+
+ my $start = int(rand(8)) * 10 + 1;
+ my $search_url = $lycos_search_url . $words . "&first=$start&page=more";
+
+ my ($search_hit_count, @subpages) =
+ pick_from_search_engine ($timeout, $search_url, $words);
+
+ my @candidates = ();
+ foreach my $u (@subpages) {
+
+ # Lycos plays redirection games.
+ # (not any more?)
+# next unless ($u =~ m@^http://click.lycos.com/director.asp
+# .*
+# \btarget=([^&]+)
+# .*
+# @x);
+# $u = url_decode($1);
+
+ next unless ($u =~ m@^http://@i); # skip non-HTTP or relative URLs
+ next if ($u =~ m@[/.]hotbot\.com\b@i); # skip lycos builtins
+ next if ($u =~ m@[/.]lycos\.com\b@i); # skip lycos builtins
+ next if ($u =~ m@[/.]terralycos\.com\b@i); # skip lycos builtins
+ next if ($u =~ m@[/.]inktomi\.com\b@i); # skip lycos builtins
+
+
+ LOG ($verbose_filter, " candidate: $u");
+ push @candidates, $u;
+ }
+
+ return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
+ $timeout, @candidates);
+}
+
+
+\f
+############################################################################
+#
+# Pick images by feeding random words into news.yahoo.com
+#
+############################################################################
+
+my $yahoo_news_url = "http://news.search.yahoo.com/search/news" .
+ "?c=news_photos" .
+ "&p=";
+
+# yahoonews
+sub pick_from_yahoo_news_text {
+ my ( $timeout ) = @_;
+
+ $last_search = $yahoo_news_url; # for warnings
+
+ my $words = random_word();
+ my $search_url = $yahoo_news_url . $words;
+
+ my ($search_hit_count, @subpages) =
+ pick_from_search_engine ($timeout, $search_url, $words);
+
+ my @candidates = ();
+ foreach my $u (@subpages) {
+
+ # de-redirectize the URLs
+ $u =~ s@^http://rds\.yahoo\.com/.*-http%3A@http:@s;
+
+ # only accept URLs on Yahoo's news site
+ next unless ($u =~ m@^http://dailynews\.yahoo\.com/@i ||
+ $u =~ m@^http://story\.news\.yahoo\.com/@i);
+ next unless ($u =~ m@&u=/@);
+
+ LOG ($verbose_filter, " candidate: $u");
+ push @candidates, $u;
+ }
+
+ return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1,
+ $timeout, @candidates);
+}
+
+
+\f
+############################################################################
+#
+# Pick images from LiveJournal's list of recently-posted images.
+#
+############################################################################
+
+my $livejournal_img_url = "http://www.livejournal.com/stats/latest-img.bml";
+
+# With most of our image sources, we get a random page and then select
+# from the images on it. However, in the case of LiveJournal, the page
+# of images tends to update slowly; so we'll remember the last N entries
+# on it and randomly select from those, to get a wider variety each time.
+
+my $lj_cache_size = 1000;
+my @lj_cache = (); # fifo, for ordering by age
+my %lj_cache = (); # hash, for detecting dups
+
+# livejournal
+sub pick_from_livejournal_images {
+ my ( $timeout ) = @_;
+
+ $last_search = $livejournal_img_url; # for warnings
+
+ my ( $base, $body ) = get_document ($livejournal_img_url, undef, $timeout);
+ return () unless $body;
+
+ $body =~ s/\n/ /gs;
+ $body =~ s/(<recent-image)\b/\n$1/gsi;
+
+ foreach (split (/\n/, $body)) {
+ next unless (m/^<recent-image\b/);
+ next unless (m/\bIMG=[\'\"]([^\'\"]+)[\'\"]/si);
+ my $img = html_unquote ($1);
+
+ next if ($lj_cache{$img}); # already have it
+
+ next unless (m/\bURL=[\'\"]([^\'\"]+)[\'\"]/si);
+ my $page = html_unquote ($1);
+ my @pair = ($img, $page);
+ LOG ($verbose_filter, " candidate: $img");
+ push @lj_cache, \@pair;
+ $lj_cache{$img} = \@pair;
+ }
+
+ return () if ($#lj_cache == -1);
+
+ my $n = $#lj_cache+1;
+ my $i = int(rand($n));
+ my ($img, $page) = @{$lj_cache[$i]};
+
+ # delete this one from @lj_cache and from %lj_cache.
+ #
+ @lj_cache = ( @lj_cache[0 .. $i-1],
+ @lj_cache[$i+1 .. $#lj_cache] );
+ delete $lj_cache{$img};
+
+ # Keep the size of the cache under the limit by nuking older entries
+ #
+ while ($#lj_cache >= $lj_cache_size) {
+ my $pairP = shift @lj_cache;
+ my $img = $pairP->[0];
+ delete $lj_cache{$img};
+ }
+
+ LOG ($verbose_load, "picked image " .($i+1) . "/$n: $img");
+
+ return ($page, $img);
+}
+
+\f
+############################################################################
+#
+# Pick images from ircimages.com (images that have been in the /topic of
+# various IRC channels.)
+#
+############################################################################
+
+my $ircimages_url = "http://ircimages.com/";
+
+# ircimages
+sub pick_from_ircimages {
+ my ( $timeout ) = @_;
+
+ $last_search = $ircimages_url; # for warnings
+
+ my $n = int(rand(2900));
+ my $search_url = $ircimages_url . "page-$n";
+
+ my ( $base, $body ) = get_document ($search_url, undef, $timeout);
+ return () unless $body;
+
+ my @candidates = ();
+
+ $body =~ s/\n/ /gs;
+ $body =~ s/(<A)\b/\n$1/gsi;
+
+ foreach (split (/\n/, $body)) {
+
+ my ($u) = m@<A\s.*\bHREF\s*=\s*([^>]+)>@i;
+ next unless $u;
+
+ if ($u =~ m/^\"([^\"]*)\"/) { $u = $1; } # quoted string
+ elsif ($u =~ m/^([^\s]*)\s/) { $u = $1; } # or token
+
+ next unless ($u =~ m/^http:/i);
+ next if ($u =~ m@^http://(searchirc\.com\|ircimages\.com)@i);
+ next unless ($u =~ m@[.](gif|jpg|jpeg|pjpg|pjpeg|png)$@i);
+
+ LOG ($verbose_http, " HREF: $u");
+ push @candidates, $u;
+ }
+
+ LOG ($verbose_filter, "" . $#candidates+1 . " links on $search_url");
+
+ return () if ($#candidates == -1);
+
+ my $i = int(rand($#candidates+1));
+ my $img = $candidates[$i];
+
+ LOG ($verbose_load, "picked image " .($i+1) . "/" . ($#candidates+1) .
+ ": $img");
+
+ $search_url = $img; # hmm...
+ return ($search_url, $img);
+}
+
+\f
+############################################################################
+#
+# Pick images from Flickr's page of recently-posted photos.
+#
+############################################################################
+
+my $flickr_img_url = "http://www.flickr.com/photos/";
+
+# Like LiveJournal, the Flickr page of images tends to update slowly,
+# so remember the last N entries on it and randomly select from those.
+
+# I know that Flickr has an API (http://www.flickr.com/services/api/)
+# but it was easy enough to scrape the HTML, so I didn't bother exploring.
+
+my $flickr_cache_size = 1000;
+my @flickr_cache = (); # fifo, for ordering by age
+my %flickr_cache = (); # hash, for detecting dups
+
+
+# flickr
+sub pick_from_flickr {
+ my ( $timeout ) = @_;
+
+ my $start = 16 * int(rand(100));
+
+ $last_search = $flickr_img_url; # for warnings
+ $last_search .= "?start=$start" if ($start > 0);
+
+ my ( $base, $body ) = get_document ($last_search, undef, $timeout);
+ return () unless $body;
+
+ $body =~ s/[\r\n]/ /gs;
+ $body =~ s/(<a)\b/\n$1/gsi;
+
+ my $count = 0;
+ my $count2 = 0;
+ foreach (split (/\n/, $body)) {
+ my ($page, $thumb) = m@<A \s [^<>]* \b HREF=\"([^<>\"]+)\" [^<>]* > \s*
+ <IMG \s [^<>]* \b SRC=\"([^<>\"]+)\" @xsi;
+ next unless defined ($thumb);
+ $page = html_unquote ($page);
+ $thumb = html_unquote ($thumb);
+
+ next unless ($thumb =~ m@^http://photos\d*\.flickr\.com/@);
+
+ my $base = "http://www.flickr.com/";
+ $page =~ s@^/@$base@;
+ $thumb =~ s@^/@$base@;
+
+ my $img = $thumb;
+ $img =~ s/_[a-z](\.[a-z\d]+)$/$1/si; # take off "thumb" suffix
+
+ $count++;
+ next if ($flickr_cache{$img}); # already have it
+
+ my @pair = ($img, $page, $start);
+ LOG ($verbose_filter, " candidate: $img");
+ push @flickr_cache, \@pair;
+ $flickr_cache{$img} = \@pair;
+ $count2++;
+ }
+
+ return () if ($#flickr_cache == -1);
+
+ my $n = $#flickr_cache+1;
+ my $i = int(rand($n));
+ my ($img, $page) = @{$flickr_cache[$i]};
+
+ # delete this one from @flickr_cache and from %flickr_cache.
+ #
+ @flickr_cache = ( @flickr_cache[0 .. $i-1],
+ @flickr_cache[$i+1 .. $#flickr_cache] );
+ delete $flickr_cache{$img};
+
+ # Keep the size of the cache under the limit by nuking older entries
+ #
+ while ($#flickr_cache >= $flickr_cache_size) {
+ my $pairP = shift @flickr_cache;
+ my $img = $pairP->[0];
+ delete $flickr_cache{$img};
+ }
+
+ LOG ($verbose_load, "picked image " .($i+1) . "/$n: $img");
+
+ return ($page, $img);
+}
+
+\f
+############################################################################
+#
+# Pick images by waiting for driftnet to populate a temp dir with files.
+# Requires driftnet version 0.1.5 or later.
+# (Driftnet is a program by Chris Lightfoot that sniffs your local ethernet
+# for images being downloaded by others.)
+# Driftnet/webcollage integration by jwz.
+#
+############################################################################
+
+# driftnet
+sub pick_from_driftnet {
+ my ( $timeout ) = @_;
+
+ my $id = $driftnet_magic;
+ my $dir = $driftnet_dir;
+ my $start = time;
+ my $now;
+
+ error ("\$driftnet_dir unset?") unless ($dir);
+ $dir =~ s@/+$@@;
+
+ error ("$dir unreadable") unless (-d "$dir/.");
+
+ $timeout = $http_timeout unless ($timeout);
+ $last_search = $id;
+
+ while ($now = time, $now < $start + $timeout) {
+ local *DIR;
+ opendir (DIR, $dir) || error ("$dir: $!");
+ while (my $file = readdir(DIR)) {
+ next if ($file =~ m/^\./);
+ $file = "$dir/$file";
+ closedir DIR;
+ LOG ($verbose_load, "picked file $file ($id)");
+ return ($id, $file);